repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ModProp | ModProp-main/lsnn/toolbox/file_saver_dumper_no_h5py.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import json
import numpy as np
import os
import pickle
import datetime
from collections import OrderedDict
import tensorflow as tf
## Functions to process tensorflow flags
def flag_to_dict(FLAG):
if float(tf.__version__[2:]) >= 5:
flag_dict = FLAG.flag_values_dict()
else:
flag_dict = FLAG.__flags
return flag_dict
def get_storage_path_reference(script_file, FLAG, root, flags=True, comment=True):
# just evalute once the flag cause sometimes it is bugged
key0 = list(dir(FLAG))[0]
getattr(FLAG,key0)
# SETUP THE SAVING FOLDER
script_name = os.path.basename(script_file)[:-3]
root_path = os.path.join(root,script_name)
# File reference for saving info
time_stamp = datetime.datetime.now().strftime("%Y_%m_%d__%H_%M__%S_%f")
flag_dict = flag_to_dict(FLAG)
assert isinstance(flag_dict,dict)
random_key = str(np.random.randint(0,1000000)).zfill(6)
file_reference = time_stamp + '-' + random_key
if flags:
config = OrderedDict(sorted((flag_dict.items())))
string_list = [k + '_' + str(v) for k, v in config.items()]
file_reference = file_reference + '-' + '-'.join(string_list)
if comment:
file_reference = file_reference + '__' + flag_dict["comment"]
file_reference = file_reference[:240]
full_storage_path = os.path.join(root_path,file_reference)
return file_reference,full_storage_path, flag_dict
## JSON
class NumpyAwareEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyAwareEncoder, self).default(obj)
## GENERAL
def save_file(obj, path, file_name, file_type='pickle'):
# Put the file type at the end if needed
if not(file_name.endswith('.' + file_type)):
file_name = file_name + '.' + file_type
# Make sure path is provided otherwise do not save
if path == '':
print(('WARNING: Saving \'{0}\' cancelled, no path given.'.format(file_name)))
return False
if file_type == 'json':
assert os.path.exists(path), 'Directory {} does not exist'.format(path)
f = open(os.path.join(path, file_name), 'w')
json.dump(obj, f, indent=4, sort_keys=True, cls=NumpyAwareEncoder)
f.close()
elif file_type == 'pickle':
f = open(os.path.join(path, file_name), 'wb')
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
else:
raise NotImplementedError('SAVING FAILED: Unknown format {}'.format(pickle))
return True
def load_file(path,file_name,file_type=None):
if file_type is None:
if file_name.endswith('.json'):
file_type = 'json'
elif file_name.endswith('.pickle'):
file_type = 'pickle'
else:
raise ValueError('LOADING FAILED: is file type is None, file type should be given in file name. Got {}'.format(file_name))
else:
# Put the file type at the end if needed
if not (file_name.endswith('.' + file_type)):
file_name = file_name + '.' + file_type
if path == '':
print(('Saving \'{0}\' cancelled, no path given.'.format(file_name)))
return False
if file_type == 'json':
f = open(os.path.join(path, file_name), 'r')
obj = json.load(f)
elif file_type == 'pickle':
f = open(os.path.join(path, file_name), 'rb')
obj = pickle.load(f)
else:
raise ValueError('LOADING FAILED: Not understanding file type: type requested {}, file name {}'.format(file_type,file_name))
return obj
def compute_or_load(function,path,file_name,file_type='pickle',verbose=True):
file_path = os.path.join(path, file_name + '.' + file_type)
if os.path.exists(file_path):
if verbose: print('File {} loaded'.format(file_name))
return load_file(path,file_name,file_type= file_type)
else:
obj = function()
save_file(obj, path, file_name, file_type=file_type)
if verbose: print('File {} saved'.format(file_name))
return obj
| 5,938 | 40.243056 | 844 | py |
ModProp | ModProp-main/lsnn/toolbox/rewiring_tools.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import tensorflow as tf
import numpy as np
import numpy.random as rd
import numpy.linalg as la
import matplotlib.pyplot as plt
def balance_matrix_per_neuron(M):
M = M.copy()
n_in, n_out = M.shape
for k in range(n_out):
# Change only non zero synapses to keep as much zeros as possible
e_act = M[:, k] > 0
i_act = M[:, k] < 0
if np.sum(i_act) == 0:
M[:, k] = 0
print(
'Warning: Neuron {} has not incoming synpases from inhibitory neurons. Setting all incoming weights to 0 to avoid un-balanced behaviour.'.format(
k))
if np.sum(e_act) == 0:
M[:, k] = 0
print(
'Warning: Neuron {} has not incoming synpases from excitatory neurons. Setting all incoming weights to 0 to avoid un-balanced behaviour.'.format(
k))
s_e = M[e_act, k].sum()
s_i = M[i_act, k].sum()
# Add a small portion to compensate if the mean is not balanced
if s_e + s_i < 0:
M[e_act, k] += np.abs(s_e + s_i) / np.sum(e_act)
else:
M[i_act, k] -= np.abs(s_e + s_i) / np.sum(i_act)
sum_check = M[:, k].sum()
assert sum_check ** 2 < 1e-5, 'Mismatch of row balancing for neuron {}, sum is {} with on exci {} and inhib {}'.format(
k, sum_check, s_e, s_i)
return M
def max_eigen_value_on_unit_circle(w):
vals = np.abs(la.eig(w)[0])
factor = 1. / np.max(vals)
return w * factor, factor
def random_sparse_signed_matrix(neuron_sign, p=1., balance_zero_mean_per_neuron=True, n_out=None):
'''
Provide a good initialization for a matrix with restricted sign.
This is a personal recipe.
:param neuron_sign:
:param p:
:param balance_zero_mean_per_neuron:
:param n_out:
:return:
'''
E = neuron_sign > 0
I = neuron_sign < 0
n = neuron_sign.__len__()
if n_out is None:
n_out = n
# Random numbers
is_con = rd.rand(n, n) < p
theta = np.abs(rd.randn(n, n))
theta = (2 * is_con - 1) * theta
sign = np.tile(np.expand_dims(neuron_sign, 1), (1, n))
w = lambda theta, sign: (theta) * (theta > 0) * sign
_w = w(theta, sign)
if (np.sum(I) > 0):
# Normalize a first time, but this is obsolete if the stabilization happens also on a single neuron basis
val_E = np.sum(_w[E, :])
val_I = - np.sum(_w[I, :])
assert val_I > 0 and val_E > 0, 'Sign error'
theta[I, :] *= val_E / val_I
_w = w(theta, sign)
if balance_zero_mean_per_neuron:
w_balanced = balance_matrix_per_neuron(_w)
theta[theta > 0] = np.abs(w_balanced[theta > 0])
_w = w(theta, sign)
assert (_w[np.logical_not(is_con)] == 0).all(), 'Balancing the neurons procuded a sign error'
else:
print("Warning: no inhibitory neurons detected, no balancing is performed")
# Normalize to scale the eigenvalues
_, factor = max_eigen_value_on_unit_circle(_w)
theta *= factor
_w = w(theta, sign)
assert (_w[E] >= 0).all(), 'Found negative excitatory weights'
assert (_w[I] <= 0).all(), 'Found negative excitatory weights'
if n_out is None:
return w, sign, theta, is_con
elif n < n_out:
sel = np.random.choice(n, size=n_out)
else:
sel = np.arange(n_out)
theta = theta[:, sel]
sign = sign[:, sel]
is_con = is_con[:, sel]
return w(theta, sign), sign, theta, is_con
def test_random_sparse_signed_matrix():
# Define parameter
p = .33
p_e = .75
mean_E = .4
std_E = 0
n_in = 400
neuron_sign = rd.choice([1, -1], n_in, p=[p_e, 1 - p_e])
M1, M1_sign, M1_theta, M1_is_con = random_sparse_signed_matrix(neuron_sign=neuron_sign, p=p,
balance_zero_mean_per_neuron=True)
s1, _ = la.eig(M1)
assert np.all(np.abs(M1[M1_is_con]) == M1_theta[M1_is_con])
assert np.all(np.sign(M1) == M1_sign * M1_is_con)
assert np.all(M1_is_con == (M1_theta > 0))
M2, _, _, _ = random_sparse_signed_matrix(neuron_sign=neuron_sign, p=1., balance_zero_mean_per_neuron=True)
M2 = M2 * (rd.rand(n_in, n_in) < p)
s2, _ = la.eig(M2)
fig, ax_list = plt.subplots(2)
ax_list[0].set_title('Random sign constrained without neuron specific balance (p={:.3g})'.format(p))
ax_list[1].set_title('Random sign constrained, probability mask taken after scaling')
ax_list[0].scatter(s1.real, s1.imag)
ax_list[1].scatter(s2.real, s2.imag)
c = plt.Circle(xy=(0, 0), radius=1, edgecolor='r', alpha=.5)
ax_list[0].add_artist(c)
c = plt.Circle(xy=(0, 0), radius=1, edgecolor='r', alpha=.5)
ax_list[1].add_artist(c)
for ax in ax_list:
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
plt.show()
def sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(theta_list, ps, upper_bound_check=False):
with tf.name_scope('NBreconnectGenerator'):
theta_vals = [theta.read_value() for theta in theta_list]
# Compute size and probability of connections
nb_possible_connections_list = [tf.cast(tf.size(th), dtype=tf.float32) * p for th, p in zip(theta_list, ps)]
total_possible_connections = tf.reduce_sum(nb_possible_connections_list)
max_total_connections = tf.cast(total_possible_connections, dtype=tf.int32)
sampling_probs = [nb_possible_connections / total_possible_connections \
for nb_possible_connections in nb_possible_connections_list]
def nb_connected(theta_val):
is_con = tf.greater(theta_val, 0)
n_connected = tf.reduce_sum(tf.cast(is_con, tf.int32))
return n_connected
total_connected = tf.reduce_sum([nb_connected(theta) for theta in theta_vals])
if upper_bound_check:
assert_upper_bound_check = tf.Assert(tf.less_equal(total_connected, max_total_connections),
data=[max_total_connections, total_connected],
name='RewiringUpperBoundCheck')
else:
assert_upper_bound_check = tf.Assert(True,
data=[max_total_connections, total_connected],
name='SkippedRewiringUpperBoundCheck')
with tf.control_dependencies([assert_upper_bound_check]):
nb_reconnect = tf.maximum(0, max_total_connections - total_connected)
sample_split = tf.distributions.Categorical(probs=sampling_probs).sample(nb_reconnect)
is_class_i_list = [tf.equal(sample_split, i) for i in range(len(theta_list))]
counts = [tf.reduce_sum(tf.cast(is_class_i, dtype=tf.int32)) for is_class_i in is_class_i_list]
return counts
def compute_gradients_with_rewiring_variables(opt, loss, var_list):
rewiring_w_list = tf.get_collection('Rewiring/Weights')
rewiring_sign_list = tf.get_collection('Rewiring/Signs')
rewiring_var_list = tf.get_collection('Rewiring/Variables')
# generate the two sets of variables
grads_and_vars = opt.compute_gradients(loss, var_list=var_list)
# compute the gradients of rewired variables (disconnected vars have non zero gradients to avoid irregularities for optimizers with momentum)
rewiring_gradient_list = tf.gradients(loss, rewiring_w_list)
rewiring_gradient_list = [g * s if g is not None else None for g, s in
zip(rewiring_gradient_list, rewiring_sign_list)]
rewiring_gradient_dict = dict([(v, g) for g, v in zip(rewiring_gradient_list, rewiring_var_list)])
# OP to apply all gradient descent updates
gathered_grads_and_vars = []
for (g, v) in grads_and_vars:
if v not in rewiring_var_list:
gathered_grads_and_vars.append((g, v))
else:
gathered_grads_and_vars.append((rewiring_gradient_dict[v], v))
return gathered_grads_and_vars
def get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities):
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th.read_value(), 0) for th in rewiring_var_list]
n_connected_list = [tf.reduce_sum(tf.cast(is_pos, dtype=tf.float32)) for is_pos in is_positive_theta_list]
size_list = [tf.size(is_pos) for is_pos in is_positive_theta_list]
init_n_connected_list = [tf.cast(size, dtype=tf.float32) * p for size, p in
zip(size_list, rewiring_connectivities)]
total_connected = tf.reduce_sum(n_connected_list)
limit_connected = tf.reduce_sum(init_n_connected_list)
check_connectivity = tf.Assert(total_connected <= limit_connected, [total_connected, limit_connected],
name='CheckRewiringConnectivityBound')
return check_connectivity
def rewiring_optimizer_wrapper(opt, loss, learning_rate, l1s, temperatures,
rewiring_connectivities, global_step=None,
var_list=None,
grads_and_vars=None):
if var_list is None:
var_list = tf.trainable_variables()
# Select the rewired variable in the given list of variable to train
rewiring_var_list = []
for v in tf.get_collection('Rewiring/Variables'):
if v in var_list:
rewiring_var_list.append(v)
if grads_and_vars is None:
grads_and_vars = compute_gradients_with_rewiring_variables(opt, loss, var_list)
else:
grads_and_vars = grads_and_vars
assert len(var_list) == len(grads_and_vars), 'Found {} elements in var_list and {} in grads_and_vars'.format(len(var_list),len(grads_and_vars))
for v, gv in zip(var_list, grads_and_vars):
assert v == gv[1]
if np.isscalar(l1s): l1s = [l1s for _ in range(len(rewiring_var_list))]
if np.isscalar(temperatures): temperatures = [temperatures for _ in range(len(rewiring_var_list))]
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th, 0) for th in rewiring_var_list]
with tf.control_dependencies(is_positive_theta_list):
check_connectivity = get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities)
with tf.control_dependencies([check_connectivity]):
gradient_check_list = [
tf.check_numerics(g, message='Found NaN or Inf in gradients with respect to the variable ' + v.name) for
(g, v) in grads_and_vars]
with tf.control_dependencies(gradient_check_list):
apply_gradients = opt.apply_gradients(grads_and_vars, global_step=global_step)
if len(rewiring_var_list) == 0:
print('Warning: No variable to rewire are found by the rewiring optimizer wrapper')
return apply_gradients
with tf.control_dependencies([apply_gradients]):
# This is to make sure that the algorithms does not reconnect synapses by mistakes,
# This can happen with optimizers like Adam
disconnection_guards = [tf.assign(var, tf.where(is_pos, var, tf.zeros_like(var))) for var, is_pos in
zip(rewiring_var_list, is_positive_theta_list)]
with tf.control_dependencies(disconnection_guards):
rewiring_var_value_list = [th.read_value() for th in rewiring_var_list]
mask_connected = lambda th: tf.cast(tf.greater(th, 0), tf.float32)
noise_update = lambda th: mask_connected(th) * tf.random_normal(shape=tf.shape(th))
apply_regularization = [tf.assign_add(th, - learning_rate * mask_connected(th_) * l1 \
+ tf.sqrt(2 * learning_rate * temp) * noise_update(th_))
for th, th_, l1, temp in
zip(rewiring_var_list, rewiring_var_value_list, l1s, temperatures)]
with tf.control_dependencies(apply_regularization):
number_of_rewired_connections = sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(
rewiring_var_list, rewiring_connectivities)
apply_rewiring = [rewiring(th, nb_reconnect=nb) for th, nb in
zip(rewiring_var_list, number_of_rewired_connections)]
with tf.control_dependencies(apply_rewiring):
train_step = tf.no_op('Train')
return train_step
def weight_sampler(n_in, n_out, p, dtype=tf.float32, neuron_sign=None, w_scale=1., eager=False):
'''
Returns a weight matrix and its underlying, variables, and sign matrices needed for rewiring.
:param n_in:
:param n_out:
:param p0:
:param dtype:
:return:
'''
if eager:
Variable = tf.contrib.eager.Variable
else:
Variable = tf.Variable
with tf.name_scope('SynapticSampler'):
nb_non_zero = int(n_in * n_out * p)
# Gererate the random mask
is_con_0 = np.zeros((n_in, n_out), dtype=bool)
ind_in = rd.choice(np.arange(n_in), size=nb_non_zero)
ind_out = rd.choice(np.arange(n_out), size=nb_non_zero)
is_con_0[ind_in, ind_out] = True
# Generate random signs
if neuron_sign is None:
theta_0 = np.abs(rd.randn(n_in, n_out) / np.sqrt(n_in)) # initial weight values
theta_0 = theta_0 * is_con_0
sign_0 = np.sign(rd.randn(n_in, n_out))
else:
assert np.size(neuron_sign) == n_in, 'Size of neuron_sign vector {}, for n_in {} expected'.format(
np.size(neuron_sign), n_in)
_, sign_0, theta_0, _ = random_sparse_signed_matrix(neuron_sign, n_out=n_out)
theta_0 *= is_con_0
# _, sign_0, theta_0, is_con_0 = random_sparse_signed_matrix(neuron_sign, p=p,
# balance_zero_mean_per_neuron=True, n_out=n_out)
# Define the tensorflow matrices
th = Variable(theta_0 * w_scale, dtype=dtype, name='theta')
w_sign = Variable(sign_0, dtype=dtype, trainable=False, name='sign')
is_connected = tf.greater(th, 0, name='mask')
w = tf.where(condition=is_connected, x=w_sign * th, y=tf.zeros((n_in, n_out), dtype=dtype), name='weight')
# Add to collections to by pass and fetch them in the rewiring wrapper function
tf.add_to_collection('Rewiring/Variables', th)
tf.add_to_collection('Rewiring/Signs', w_sign)
tf.add_to_collection('Rewiring/Weights', w)
return w, w_sign, th, is_connected
def assert_connection_number(theta, targeted_number):
'''
Function to check during the tensorflow simulation if the number of connection in well defined after each simulation.
:param theta:
:param targeted_number:
:return:
'''
th = theta.read_value()
is_con = tf.greater(th, 0)
nb_is_con = tf.reduce_sum(tf.cast(is_con, tf.int32))
assert_is_con = tf.Assert(tf.equal(nb_is_con, targeted_number), data=[nb_is_con, targeted_number],
name='NumberOfConnectionCheck')
return assert_is_con
def rewiring(theta, target_nb_connection=None, nb_reconnect=None, epsilon=1e-12, check_zero_numbers=False):
'''
The rewiring operation to use after each iteration.
:param theta:
:param target_nb_connection:
:return:
'''
with tf.name_scope('rewiring'):
th = theta.read_value()
is_con = tf.greater(th, 0)
reconnect_candidate_coord = tf.where(tf.logical_not(is_con), name='CandidateCoord')
n_candidates = tf.shape(reconnect_candidate_coord)[0]
if nb_reconnect is None:
n_connected = tf.reduce_sum(tf.cast(is_con, tf.int32))
nb_reconnect = target_nb_connection - n_connected
nb_reconnect = tf.clip_by_value(nb_reconnect, 0, n_candidates)
reconnect_sample_id = tf.random_shuffle(tf.range(n_candidates))[:nb_reconnect]
reconnect_sample_coord = tf.gather(reconnect_candidate_coord, reconnect_sample_id, name='SelectedCoord')
# Apply the rewiring
reconnect_vals = tf.fill(dims=[nb_reconnect], value=epsilon, name='InitValues')
reconnect_op = tf.scatter_nd_update(theta, reconnect_sample_coord, reconnect_vals, name='Reconnect')
with tf.control_dependencies([reconnect_op]):
if check_zero_numbers and target_nb_connection is not None:
connection_check = assert_connection_number(theta=theta, targeted_number=target_nb_connection)
with tf.control_dependencies([connection_check]):
return tf.no_op('Rewiring')
else:
return tf.no_op('Rewiring')
if __name__ == '__main__':
test_random_sparse_signed_matrix()
| 19,259 | 43.790698 | 844 | py |
ModProp | ModProp-main/lsnn/toolbox/tensorflow_utils.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import tensorflow as tf
import numpy.random as rd
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
def reduce_variance(v,axis=None):
m = tf.reduce_mean(v,axis=axis)
if axis is not None:
m = tf.expand_dims(m,axis=axis)
return tf.reduce_mean((v - m)**2,axis=axis)
def boolean_count(var,axis=-1):
v = tf.cast(var,dtype=tf.int32)
return tf.reduce_sum(v,axis=axis)
def variable_summaries(var,name=''):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope(name + 'Summary'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def tf_repeat(tensor,num, axis):
with tf.name_scope('Repeat'):
dims = len(tensor.get_shape())
dtype = tensor.dtype
assert dtype in [tf.float32,tf.float16,tf.float64,tf.int32,tf.int64,tf.bool], 'Data type not understood: ' + dtype.__str__
# Generate a new dimension with the
tensor = tf.expand_dims(tensor,axis=dims)
exp = tf.ones(shape=np.concatenate([np.ones(dims,dtype=int),[num]]),dtype=dtype)
tensor_exp = tensor * exp
# Split and stack in the right dimension
splitted = tf.unstack(tensor_exp,axis=axis)
concatenated = tf.concat(splitted,axis=dims-1)
# permute to put back the axis where it should be
axis_permutation = np.arange(dims-1)
axis_permutation = np.insert(axis_permutation,axis,dims-1)
transposed = tf.transpose(concatenated,perm=axis_permutation)
return transposed
def tf_repeat_test():
a = np.arange(12).reshape(3,4)
axis_a = 1
num_a = 5
b = rd.randn(3)
axis_b = 0
num_b = 4
c = rd.randn(4,5,7,3,2)
axis_c = 1
num_c = 11
sess = tf.Session()
for tensor, axis, num in zip([a,b,c], [axis_a,axis_b,axis_c], [num_a,num_b,num_c]):
res_np = np.repeat(tensor, repeats=num, axis=axis)
res_tf = sess.run(tf_repeat(tf.constant(value=tensor,dtype=tf.float32),axis=axis,num=num))
assert np.mean((res_np - res_tf)**2) < 1e-6, 'Repeat mismatched between np and tf: \n np: {} \n tf: {}'.format(res_np,res_tf)
print('tf_repeat_test -> success')
def tf_downsample(tensor,new_size,axis):
with tf.name_scope('Downsample'):
dims = len(tensor.get_shape())
splitted = tf.split(tensor,num_or_size_splits=new_size,axis=axis)
stacked = tf.stack(splitted,axis=dims)
reduced = tf.reduce_mean(stacked,axis=axis)
permutation = np.arange(dims-1)
permutation = np.insert(permutation,axis,dims-1)
transposed = tf.transpose(reduced,perm=permutation)
return transposed
def tf_downsample_test():
a = np.array([1,2,1,2,4,6,4,6])
sol_a = np.array([1.5,5.])
axis_a = 0
num_a = 2
sol_c = rd.randn(4, 5, 7, 3, 2)
axis_c = 1
num_c = 5
c = np.repeat(sol_c,repeats=11,axis=axis_c)
sess = tf.Session()
for t_np,axis,num,sol in zip([a,c],[axis_a,axis_c],[num_a,num_c],[sol_a,sol_c]):
t = tf.constant(t_np,dtype=tf.float32)
t_ds = tf_downsample(t,new_size=num,axis=axis)
t_ds_np = sess.run(t_ds)
assert np.sum((t_ds_np - sol)**2) < 1e-6, 'Failed test: mistmatch between downsampled: \n arg: {} \n output: {} \n should be: {}'.format(t_np,t_ds_np,sol)
print('tf_downsample_test -> success')
def tf_roll(buffer, new_last_element=None, axis=0):
with tf.name_scope('roll'):
shp = buffer.get_shape()
l_shp = len(shp)
if shp[-1] == 0:
return buffer
# Permute the index to roll over the right index
perm = np.concatenate([[axis],np.arange(axis),np.arange(start=axis+1,stop=l_shp)])
buffer = tf.transpose(buffer, perm=perm)
# Add an element at the end of the buffer if requested, otherwise, add zero
if new_last_element is None:
shp = tf.shape(buffer)
new_last_element = tf.zeros(shape=shp[1:], dtype=buffer.dtype)
new_last_element = tf.expand_dims(new_last_element, axis=0)
new_buffer = tf.concat([buffer[1:], new_last_element], axis=0, name='rolled')
# Revert the index permutation
inv_perm = np.argsort(perm)
new_buffer = tf.transpose(new_buffer,perm=inv_perm)
new_buffer = tf.identity(new_buffer,name='Roll')
#new_buffer.set_shape(shp)
return new_buffer
def tf_tuple_of_placeholder(shape_named_tuple,dtype,default_named_tuple=None, name='TupleOfPlaceholder'):
with tf.name_scope(name):
placeholder_dict = OrderedDict({})
if not(default_named_tuple) is None:
default_dict = default_named_tuple._asdict()
for k,v in default_dict.items():
placeholder_dict[k] = tf.placeholder_with_default(v,v.get_shape(),name=k,dtype=dtype)
else:
shape_dict = shape_named_tuple._asdict()
for k,v in shape_dict.items():
placeholder_dict[k] = tf.placeholder(shape=v,dtype=dtype,name=k)
placeholder_tuple = default_named_tuple.__class__(**placeholder_dict)
return placeholder_tuple
def tf_feeding_dict_of_placeholder_tuple(tuple_of_placeholder,tuple_of_values):
dict = {}
for k,v in tuple_of_placeholder.__asdict().items():
dict[v] = tuple_of_values.__asdict()[k]
return dict
def moving_sum(tensor,n_steps):
with tf.name_scope('MovingSum'):
# Try to get the shape if int
try: n_batch = int(tensor.get_shape()[0])
except: n_batch = tf.shape(tensor)[0]
try: n_time = int(tensor.get_shape()[1])
except: n_time = tf.shape(tensor)[1]
try: n_neuron = int(tensor.get_shape()[2])
except: n_neuron = tf.shape(tensor)[2]
shp = tensor.get_shape()
assert len(shp) == 3, 'Shape tuple for time filtering should be of length 3, found {}'.format(shp)
t0 = tf.constant(0, dtype=tf.int32, name="time")
out = tf.TensorArray(dtype=tensor.dtype, size=n_time, element_shape=(n_batch,n_neuron))
buffer = tf.zeros(shape=(n_batch,n_steps,n_neuron),dtype=tensor.dtype)
def body(out, buffer, t):
x = tensor[:,t,:]
buffer = tf_roll(buffer, new_last_element=x, axis=1)
new_y = tf.reduce_sum(buffer,axis=1)
out = out.write(t, new_y)
return (out, buffer, t + 1)
def condition(out, buffer, t):
return t < n_time
out, _, _ = tf.while_loop(cond=condition, body=body, loop_vars=[out, buffer, t0])
out = out.stack()
out = tf.transpose(out, perm=[1, 0, 2])
return out
def exp_convolve(tensor, decay):
with tf.name_scope('ExpConvolve'):
assert tensor.dtype in [tf.float16,tf.float32,tf.float64]
tensor_time_major = tf.transpose(tensor, perm=[1, 0, 2])
initializer = tf.zeros_like(tensor_time_major[0])
filtered_tensor = tf.scan(lambda a, x: a * decay + (1-decay) * x,tensor_time_major,initializer=initializer)
filtered_tensor = tf.transpose(filtered_tensor,perm=[1,0,2])
return filtered_tensor
def discounted_return(reward,discount,axis=-1,boundary_value=0):
with tf.name_scope('DiscountedReturn'):
l_shp = len(reward.get_shape())
assert l_shp >= 1, 'Tensor must be rank 1 or higher'
axis = np.mod(axis,l_shp)
perm = np.arange(l_shp)
perm[0] = axis
perm[axis] = 0
t = tf.transpose(reward, perm=perm)
t = tf.reverse(tensor=t,axis=[0])
initializer = tf.ones_like(t[0]) * boundary_value
t = tf.scan(lambda a, x: a * discount + x,t,initializer=initializer)
t = tf.reverse(t,axis=[0])
t = tf.transpose(t,perm=perm)
return t
def tf_moving_sum_test():
sess = tf.Session()
def moving_sum_numpy(tensor,n_steps):
n_batch,n_time,n_neuron = tensor.shape
def zz(d):
z = np.zeros(shape=(n_batch,d,n_neuron),dtype=tensor.dtype)
return z
stacks = [np.concatenate([zz(d),tensor[:,:n_time-d,:]],axis=1) for d in range(n_steps)]
stacks = np.array(stacks)
return np.sum(np.array(stacks),axis=0)
def assert_quantitative_error(arr1,arr2):
err = np.mean((arr1 - arr2) ** 2)
if err > 1e-6:
plt.plot(arr1[0, :, :],color='blue')
plt.plot(arr2[0, :, :],color='green')
plt.show()
raise ValueError('Mistmatch of the smoothing with error {}'.format(err))
# quick test
a = np.array([0,1,2,4,1,2]).reshape((1,6,1))
n_a = 2
sol_a = np.array([0,1,3,6,5,3]).reshape((1,6,1))
# Check the numpy function
summed_np = moving_sum_numpy(a,n_a)
assert_quantitative_error(sol_a,summed_np)
# Check the tf function
summed_tf = sess.run(moving_sum(tf.constant(a),n_a))
assert_quantitative_error(sol_a,summed_tf)
T = 100
n_neuron = 10
n_batch=3
n_delay = 5
tensor = rd.randn(n_batch,T,n_neuron)
summed_np = moving_sum_numpy(tensor,n_delay)
summed_tf = sess.run(moving_sum(tf.constant(tensor,dtype=tf.float32),n_delay))
assert_quantitative_error(summed_np,summed_tf)
print('tf_moving_sum_test -> success')
def tf_exp_convolve_test():
sess = tf.Session()
def exp_convolve_numpy(tensor,decay):
n_batch,n_time,n_neuron = tensor.shape
out = np.zeros_like(tensor,dtype=float)
running = np.zeros_like(tensor[:,0,:],dtype=float)
for t in range(n_time):
out[:,t,:] = decay * running + (1-decay) * tensor[:,t,:]
running = out[:,t,:]
return out
def assert_quantitative_error(arr_np, arr_tf):
err = np.mean((arr_np - arr_tf) ** 2)
if err > 1e-6:
plt.plot(arr_np[0, :, :], color='blue', label='np')
plt.plot(arr_tf[0, :, :], color='green', label='tf')
plt.legend()
plt.show()
raise ValueError('Mistmatch of the smoothing with error {}'.format(err))
# quick test
a = np.array([0,1,2,4,1,2]).reshape((1,6,1))
decay_a = 0.5
# Check the numpy function
summed_np = exp_convolve_numpy(a,decay_a)
summed_tf = sess.run(exp_convolve(tf.constant(a,dtype=tf.float32),decay_a))
assert_quantitative_error(summed_np,summed_tf)
T = 100
n_neuron = 10
n_batch= 3
decay = .5
tensor = rd.randn(n_batch,T,n_neuron)
summed_np = exp_convolve_numpy(tensor,decay)
summed_tf = sess.run(exp_convolve(tf.constant(tensor,dtype=tf.float32),decay))
assert_quantitative_error(summed_np,summed_tf)
print('tf_exp_convolve_test -> success')
def tf_discounted_reward_test():
g = .8
a = [.1, 0, 1.]
a_return = [.1 + g**2,g,1.]
b = np.array([[1,2,3], a])
b_return = np.array([[1 + 2*g + 3*g**2, 2 + 3*g,3], a_return])
c = rd.rand(3,4,2)
c_return = np.zeros_like(c)
n_b,T,n = c.shape
for i in range(n_b):
tmp = np.zeros(n)
for t in range(T):
tmp = g * tmp + c[i,T-1-t]
c_return[i,T-1-t] = tmp
sess = tf.Session()
for t,t_return,axis in zip([a,b,c],[a_return,b_return,c_return],[-1,-1,1]):
tf_return = discounted_return(tf.constant(t,dtype=tf.float32),g,axis=axis)
np_return = sess.run(tf_return)
assert np.sum((np_return - np.array(t_return))**2) < 1e-6, 'Mismatch: \n tensor {} \n solution {} \n found {}'.format(t,t_return,np_return)
if __name__ == '__main__':
tf_repeat_test()
tf_downsample_test()
tf_moving_sum_test()
tf_exp_convolve_test()
tf_discounted_reward_test() | 13,530 | 34.514436 | 844 | py |
ModProp | ModProp-main/lsnn/toolbox/matplotlib_extension.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'guillaume'
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import patches
from matplotlib import cm
from collections import OrderedDict
from matplotlib.colors import LinearSegmentedColormap
def raster_plot(ax,spikes,linewidth=0.8,**kwargs):
n_t,n_n = spikes.shape
event_times,event_ids = np.where(spikes)
max_spike = 10000
event_times = event_times[:max_spike]
event_ids = event_ids[:max_spike]
for n,t in zip(event_ids,event_times):
ax.vlines(t, n + 0., n + 1., linewidth=linewidth, **kwargs)
ax.set_ylim([0 + .5, n_n + .5])
ax.set_xlim([0, n_t])
ax.set_yticks([0, n_n])
def strip_right_top_axis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def arrow_trajectory(ax,data,epsi=0,hdw=.03,lab='',fact=.8,color=(1.,1.,1.,1.),arrow_tick_steps=[],**kwargs):
fc = tuple(np.clip(np.array(color) * fact,0,1.))
ploted_lab = False
X = data[:-1,:]
dX = data[1:,:] - data[:-1,:]
t0 = 0
T = data.shape[0]-1
if epsi > 0:
while sum(dX[T-1]**2) / np.mean( np.sum(dX**2,axis=1)) < epsi: T = T-1
while sum(dX[t0]**2) / np.mean(np.sum(dX**2,axis=1)) < epsi: t0 = t0+1
ax.scatter(data[t0,0],data[t0,1],s=50,facecolor=fc,color=color,**kwargs)
for t in np.arange(t0+1,T):
x,y = X[t-1,:]
dx,dy = dX[t-1,:]
if t == T-1:
headwidth = hdw
head_length = hdw * 1.5
elif t in arrow_tick_steps:
headwidth = hdw
head_length = hdw * 0.15
else:
headwidth = 0.
head_length = 0.
if dx != 0 or dy != 0:
if ploted_lab:
p = patches.FancyArrow(x, y, dx, dy,facecolor=color,edgecolor=fc,head_width=headwidth,head_length=head_length,**kwargs)
else:
ploted_lab = True
p = patches.FancyArrow(x, y, dx, dy,facecolor=color,edgecolor=fc,head_width=headwidth,head_length=head_length,label=lab,**kwargs)
ax.add_patch(p)
def hide_bottom_axis(ax):
ax.spines['bottom'].set_visible(False)
ax.set_xticklabels([])
ax.get_xaxis().set_visible(False)
| 3,976 | 41.763441 | 844 | py |
ModProp | ModProp-main/lsnn/toolbox/__init__.py | 0 | 0 | 0 | py |
|
ModProp | ModProp-main/lsnn/toolbox/tensorflow_einsums/test_bij_ki_to_bkj.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import numpy.random as rd
import tensorflow as tf
from lsnn.guillaume_toolbox import einsum_bij_ki_to_bkj
b = 2
i,j,k = 3,4,5
a = rd.rand(b,i,j)
b = rd.rand(k,i)
tf_a = tf.constant(a)
tf_b = tf.constant(b)
prod2 = einsum_bij_ki_to_bkj(tf_a,tf_b)
sess = tf.Session()
np_prod_1 = np.einsum('bij,ki->bkj',a,b)
np_prod_2 = sess.run(prod2)
assert (np_prod_1 == np_prod_2).all(), 'Mistmatch'
print('Prod 1')
print(np_prod_1)
print('Prod 2')
print(np_prod_2) | 2,164 | 54.512821 | 844 | py |
ModProp | ModProp-main/lsnn/toolbox/tensorflow_einsums/test_bi_ijk_to_bjk.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy.random as rd
import tensorflow as tf
from lsnn.guillaume_toolbox import einsum_bi_ijk_to_bjk
a = rd.rand(2,3)
b = rd.rand(3,4,5)
tf_a = tf.constant(a,)
tf_b = tf.constant(b)
prod1 = tf.einsum('bi,ijk->bjk',tf_a,tf_b)
prod2 = einsum_bi_ijk_to_bjk(tf_a,tf_b)
sess = tf.Session()
np_prod_1 = sess.run(prod1)
np_prod_2 = sess.run(prod2)
assert (np_prod_1 == np_prod_2).all(), 'Mistmatch'
print('Prod 1')
print(np_prod_1)
print('Prod 2')
print(np_prod_2) | 2,156 | 58.916667 | 844 | py |
ModProp | ModProp-main/lsnn/toolbox/tensorflow_einsums/test_bij_jk_to_bik.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy.random as rd
import tensorflow as tf
from lsnn.guillaume_toolbox import einsum_bij_jk_to_bik
a = rd.rand(2,3,4)
b = rd.rand(4,5)
tf_a = tf.constant(a,)
tf_b = tf.placeholder(shape=(4,None),dtype=tf.float64)
prod1 = tf.einsum('bij,jk->bik',tf_a,tf_b)
prod2 = einsum_bij_jk_to_bik(tf_a,tf_b)
print(tf_b.get_shape())
print(prod2.get_shape())
sess = tf.Session()
np_prod_1 = sess.run(prod1, feed_dict={tf_b:b})
np_prod_2 = sess.run(prod2, feed_dict={tf_b:b})
assert (np_prod_1 == np_prod_2).all(), 'Mistmatch'
print('Prod 1')
print(np_prod_1)
print('Prod 2')
print(np_prod_2) | 2,279 | 57.461538 | 844 | py |
ModProp | ModProp-main/lsnn/toolbox/tensorflow_einsums/__init__.py | 0 | 0 | 0 | py |
|
ModProp | ModProp-main/lsnn/toolbox/tensorflow_einsums/einsum_re_written.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import tensorflow as tf
def einsum_bi_ijk_to_bjk(a,b):
batch_size = tf.shape(a)[0]
shp_a = a.get_shape()
shp_b = b.get_shape()
b_ = tf.reshape(b,(int(shp_b[0]), int(shp_b[1]) * int(shp_b[2])))
ab_ = tf.matmul(a,b_)
ab = tf.reshape(ab_,(batch_size,int(shp_b[1]),int(shp_b[2])))
return ab
def einsum_bi_bij_to_bj(a,b):
with tf.name_scope('Einsum-Bi-Bij-Bj'):
a_ = tf.expand_dims(a,axis=1)
a_b = tf.matmul(a_,b)
ab = a_b[:,0,:]
return ab
def einsum_bi_bijk_to_bjk(a,b):
with tf.name_scope('Einsum-Bi-Bijk-Bjk'):
a_ = a[:,:,None,None]
a_b = a_ * b
return tf.reduce_sum(a_b,axis=1)
def einsum_bij_jk_to_bik(a,b):
try:
n_b = int(a.get_shape()[0])
except:
n_b = tf.shape(a)[0]
try:
n_i = int(a.get_shape()[1])
except:
n_i = tf.shape(a)[1]
try:
n_j = int(a.get_shape()[2])
except:
n_j = tf.shape(a)[2]
try:
n_k = int(b.get_shape()[1])
except:
n_k = tf.shape(b)[1]
a_ = tf.reshape(a,(n_b * n_i,n_j))
a_b = tf.matmul(a_,b)
ab = tf.reshape(a_b,(n_b,n_i,n_k))
return ab
def einsum_bij_ki_to_bkj(a,b):
# Write them as b k i j
a_ = tf.expand_dims(a,axis=1)
b_ = tf.expand_dims(b,axis=0)
b_ = tf.expand_dims(b_,axis=3)
ab = tf.reduce_sum(a_ * b_,axis=[2])
return ab
| 3,076 | 39.486842 | 844 | py |
ModProp | ModProp-main/lsnn/toolbox/tensorflow_einsums/test_bi_bij_to_bj.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy.random as rd
import tensorflow as tf
from lsnn.guillaume_toolbox import einsum_bi_bij_to_bj
a = rd.rand(2,3)
b = rd.rand(2,3,4)
tf_a = tf.constant(a)
tf_b = tf.constant(b)
prod1 = tf.einsum('bi,bij->bj',tf_a,tf_b)
prod2 = einsum_bi_bij_to_bj(tf_a,tf_b)
sess = tf.Session()
np_prod_1 = sess.run(prod1)
np_prod_2 = sess.run(prod2)
assert (np_prod_1 == np_prod_2).all(), 'Mistmatch'
print('Prod 1')
print(np_prod_1)
print('Prod 2')
print(np_prod_2) | 2,152 | 58.805556 | 844 | py |
query-selected-attention | query-selected-attention-main/test.py | import os
import torch
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
import util.util as util
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
train_dataset = create_dataset(util.copyconf(opt, phase="train"))
model = create_model(opt) # create a model given opt.model and other options
# create a webpage for viewing the results
web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
print('creating web directory', web_dir)
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
for i, data in enumerate(dataset):
model.set_input(data) # unpack data from data loader
if i == 0:
model.data_dependent_initialize()
model.setup(opt) # regular setup: load and print networks; create schedulers
model.parallelize()
if opt.eval:
model.eval()
if i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.test() # run inference
visuals = model.get_current_visuals() # get image results
img_path = model.get_image_paths() # get image paths
if i % 5 == 0: # save images to an HTML file
print('processing (%04d)-th image... %s' % (i, img_path))
save_images(webpage, visuals, img_path, width=opt.display_winsize)
webpage.save() # save the HTML
| 2,235 | 49.818182 | 123 | py |
query-selected-attention | query-selected-attention-main/train.py | import time
import torch
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer
if __name__ == '__main__':
opt = TrainOptions().parse() # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
model = create_model(opt) # create a model given opt.model and other options
print('The number of training images = %d' % dataset_size)
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
opt.visualizer = visualizer
total_iters = 0 # the total number of training iterations
optimize_time = 0.1
times = []
for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
dataset.set_epoch(epoch)
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
batch_size = data["A"].size(0)
total_iters += batch_size
epoch_iter += batch_size
torch.cuda.synchronize()
optimize_start_time = time.time()
model.set_input(data) # unpack data from dataset and apply preprocessing
if epoch == opt.epoch_count and i == 0:
model.data_dependent_initialize()
model.setup(opt) # regular setup: load and print networks; create schedulers
model.parallelize()
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
torch.cuda.synchronize()
optimize_time = (time.time() - optimize_start_time) / batch_size * 0.005 + 0.995 * optimize_time
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
visualizer.print_current_losses(epoch, epoch_iter, losses, optimize_time, t_data)
if opt.display_id is None or opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
print(opt.name) # it's useful to occasionally show the experiment name on console
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
model.update_learning_rate() # update learning rates at the end of every epoch.
| 4,279 | 55.315789 | 186 | py |
query-selected-attention | query-selected-attention-main/options/train_options.py | from .base_options import BaseOptions
class TrainOptions(BaseOptions):
"""This class includes training options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
# visdom and HTML visualization parameters
parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
parser.add_argument('--display_id', type=int, default=None, help='window id of the web display. Default is random window id')
parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# network saving and loading parameters
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
parser.add_argument('--evaluation_freq', type=int, default=5000, help='evaluation freq')
parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
parser.add_argument('--continue_train', type=bool, default=False, help='continue training: load the latest model')
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--pretrained_name', type=str, default=None, help='resume training from another checkpoint')
# training parameters
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs with the initial learning rate')
parser.add_argument('--n_epochs_decay', type=int, default=200, help='number of epochs to linearly decay learning rate to zero')
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
parser.add_argument('--beta2', type=float, default=0.999, help='momentum term of adam')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
self.isTrain = True
return parser
| 3,799 | 83.444444 | 210 | py |
query-selected-attention | query-selected-attention-main/options/base_options.py | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self, cmd_line=None):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
self.cmd_line = None
if cmd_line is not None:
self.cmd_line = cmd_line.split()
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', default='./datasets/horse2zebra', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='horse2zebra_qsattn_global', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--easy_label', type=str, default='experiment_name', help='Interpretable name')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='qs', help='chooses which model to use.')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', choices=['basic', 'n_layers', 'pixel', 'patch', 'tilestylegan2', 'stylegan2'], help='specify discriminator architecture. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', choices=['resnet_9blocks', 'resnet_6blocks', 'unet_256', 'unet_128', 'stylegan2', 'smallstylegan2', 'resnet_cat'], help='specify generator architecture')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--normG', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for G')
parser.add_argument('--normD', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for D')
parser.add_argument('--init_type', type=str, default='xavier', choices=['normal', 'xavier', 'kaiming', 'orthogonal'], help='network initialization')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', type=util.str2bool, nargs='?', const=True, default=True,
help='no dropout for the generator')
parser.add_argument('--no_antialias', action='store_true', help='if specified, use stride=2 convs instead of antialiased-downsampling (sad)')
parser.add_argument('--no_antialias_up', action='store_true', help='if specified, use [upconv(learned filter)] instead of [upconv(hard-coded [1,3,3,1] filter), conv]')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
if self.cmd_line is None:
opt, _ = parser.parse_known_args()
else:
opt, _ = parser.parse_known_args(self.cmd_line)
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
if self.cmd_line is None:
opt, _ = parser.parse_known_args() # parse again with new defaults
else:
opt, _ = parser.parse_known_args(self.cmd_line) # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
if self.cmd_line is None:
return parser.parse_args()
else:
return parser.parse_args(self.cmd_line)
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
try:
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
except PermissionError as error:
print("permission error {}".format(error))
pass
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 9,260 | 57.613924 | 287 | py |
query-selected-attention | query-selected-attention-main/options/__init__.py | """This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
| 136 | 67.5 | 135 | py |
query-selected-attention | query-selected-attention-main/options/test_options.py | from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""This class includes test options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared options
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
parser.add_argument('--num_test', type=int, default=20, help='how many test images to run')
# To avoid cropping, the load_size should be the same as crop_size
parser.set_defaults(load_size=parser.get_default('crop_size'))
self.isTrain = False
return parser
| 975 | 43.363636 | 104 | py |
query-selected-attention | query-selected-attention-main/models/base_model.py | import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks_global
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this fucntion, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): specify the images that you want to display and save.
-- self.visual_names (str list): define networks used in our training.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def dict_grad_hook_factory(add_func=lambda x: x):
saved_dict = dict()
def hook_gen(name):
def grad_hook(grad):
saved_vals = add_func(grad)
saved_dict[name] = saved_vals
return grad_hook
return hook_gen, saved_dict
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks_global.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
def parallelize(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
setattr(self, 'net' + name, torch.nn.DataParallel(net, self.opt.gpu_ids))
def data_dependent_initialize(self):
pass
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
if self.opt.isTrain and self.opt.pretrained_name is not None:
load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)
else:
load_dir = self.save_dir
load_path = os.path.join(load_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
# for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
# self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def generate_visuals_for_evaluation(self, data, mode):
return {}
| 11,231 | 42.366795 | 260 | py |
query-selected-attention | query-selected-attention-main/models/patchnce.py | from packaging import version
import torch
from torch import nn
class PatchNCELoss(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
self.mask_dtype = torch.uint8 if version.parse(torch.__version__) < version.parse('1.2.0') else torch.bool
def forward(self, feat_q, feat_k):
batchSize = feat_q.shape[0]
dim = feat_q.shape[1]
feat_k = feat_k.detach()
# pos logit
l_pos = torch.bmm(feat_q.view(batchSize, 1, -1), feat_k.view(batchSize, -1, 1))
l_pos = l_pos.view(batchSize, 1)
# neg logit -- current batch
# reshape features to batch size
feat_q = feat_q.view(self.opt.batch_size, -1, dim)
feat_k = feat_k.view(self.opt.batch_size, -1, dim)
npatches = feat_q.size(1)
l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1)) # b*np*np
# diagonal entries are similarity between same features, and hence meaningless.
# just fill the diagonal with very small number, which is exp(-10) and almost zero
diagonal = torch.eye(npatches, device=feat_q.device, dtype=self.mask_dtype)[None, :, :]
l_neg_curbatch.masked_fill_(diagonal, -10.0)
l_neg = l_neg_curbatch.view(-1, npatches)
out = torch.cat((l_pos, l_neg), dim=1) / self.opt.nce_T
loss = self.cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long,
device=feat_q.device))
return loss
| 1,598 | 38 | 114 | py |
query-selected-attention | query-selected-attention-main/models/qs_model.py | import numpy as np
import torch
from .base_model import BaseModel
from . import networks_global, networks_local, networks_local_global
from .patchnce import PatchNCELoss
import util.util as util
class QSModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.add_argument('--QS_mode', type=str, default="global", choices='(global, local, local_global)')
parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))')
parser.add_argument('--lambda_NCE', type=float, default=1.0, help='weight for NCE loss: NCE(G(X), X)')
parser.add_argument('--nce_idt', type=util.str2bool, default=True, help='use NCE loss for identity mapping: NCE(G(Y), Y))')
parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers')
parser.add_argument('--netF', type=str, default='mlp_sample', choices=['sample', 'reshape', 'mlp_sample'], help='how to downsample the feature map')
parser.add_argument('--netF_nc', type=int, default=256)
parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss')
parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer')
parser.add_argument('--flip_equivariance',
type=util.str2bool, nargs='?', const=True, default=False,
help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT")
parser.set_defaults(pool_size=0) # no image pooling
opt, _ = parser.parse_known_args()
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
# specify the training losses you want to print out.
# The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'G', 'NCE']
self.visual_names = ['real_A', 'fake_B', 'real_B']
self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')]
if opt.nce_idt and self.isTrain:
self.loss_names += ['NCE_Y']
self.visual_names += ['idt_B']
if self.isTrain:
self.model_names = ['G', 'F', 'D']
else: # during test time, only load G
self.model_names = ['G']
if self.opt.QS_mode == 'global':
networks = networks_global
elif self.opt.QS_mode == 'local':
networks = networks_local
else:
networks = networks_local_global
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt)
self.netF = networks.define_F(opt.input_nc, opt.netF, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
if self.isTrain:
self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionNCE = []
for nce_layer in self.nce_layers:
self.criterionNCE.append(PatchNCELoss(opt).to(self.device))
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def data_dependent_initialize(self):
"""
The feature network netF is defined in terms of the shape of the intermediate, extracted
features of the encoder portion of netG. Because of this, the weights of netF are
initialized at the first feedforward pass with some input images.
Please also see PatchSampleF.create_mlp(), which is called at the first forward() call.
"""
bs_per_gpu = self.real_A.size(0) // len(self.opt.gpu_ids)
self.real_A = self.real_A[:bs_per_gpu]
self.real_B = self.real_B[:bs_per_gpu]
self.forward() # compute fake images: G(A)
if self.opt.isTrain:
self.backward_D() # calculate gradients for D
self.backward_G() # calculate graidents for G
if self.opt.lambda_NCE > 0.0:
self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2))
self.optimizers.append(self.optimizer_F)
def optimize_parameters(self):
# forward
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
if self.opt.netF == 'mlp_sample':
self.optimizer_F.zero_grad()
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
if self.opt.netF == 'mlp_sample':
self.optimizer_F.step()
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.real = torch.cat((self.real_A, self.real_B), dim=0) if self.opt.nce_idt else self.real_A
if self.opt.flip_equivariance:
self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5)
if self.flipped_for_equivariance:
self.real = torch.flip(self.real, [3])
self.fake = self.netG(self.real)
self.fake_B = self.fake[:self.real_A.size(0)]
if self.opt.nce_idt:
self.idt_B = self.fake[self.real_A.size(0):]
self.feat_k = self.netG(self.real_A, self.nce_layers, encode_only=True)
def backward_D(self):
if self.opt.lambda_GAN > 0.0:
"""Calculate GAN loss for the discriminator"""
fake = self.fake_B.detach()
# Fake; stop backprop to the generator by detaching fake_B
pred_fake = self.netD(fake)
self.loss_D_fake = self.criterionGAN(pred_fake, False).mean()
# Real
pred_real = self.netD(self.real_B)
loss_D_real_unweighted = self.criterionGAN(pred_real, True)
self.loss_D_real = loss_D_real_unweighted.mean()
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
else:
self.loss_D_real, self.loss_D_fake, self.loss_D = 0.0, 0.0, 0.0
def backward_G(self):
"""Calculate GAN and NCE loss for the generator"""
fake = self.fake_B
# First, G(A) should fake the discriminator
if self.opt.lambda_GAN > 0.0:
pred_fake = self.netD(fake)
self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN
else:
self.loss_G_GAN = 0.0
if self.opt.lambda_NCE > 0.0:
self.loss_NCE = self.calculate_NCE_loss(self.real_A, self.fake_B)
else:
self.loss_NCE = 0.0
if self.opt.nce_idt and self.opt.lambda_NCE > 0.0:
self.loss_NCE_Y = self.calculate_NCE_loss(self.real_B, self.idt_B)
loss_NCE_both = (self.loss_NCE + self.loss_NCE_Y) * 0.5
else:
loss_NCE_both = self.loss_NCE
self.loss_G = self.loss_G_GAN + loss_NCE_both
self.loss_G.backward()
def calculate_NCE_loss(self, src, tgt):
n_layers = len(self.nce_layers)
feat_q = self.netG(tgt, self.nce_layers, encode_only=True)
if self.opt.flip_equivariance and self.flipped_for_equivariance:
feat_q = [torch.flip(fq, [3]) for fq in feat_q]
feat_k = self.netG(src, self.nce_layers, encode_only=True)
feat_k_pool, sample_ids, attn_mats = self.netF(feat_k, self.opt.num_patches, None, None)
feat_q_pool, _, _ = self.netF(feat_q, self.opt.num_patches, sample_ids, attn_mats)
total_nce_loss = 0.0
for f_q, f_k, crit, nce_layer in zip(feat_q_pool, feat_k_pool, self.criterionNCE, self.nce_layers):
loss = crit(f_q, f_k) * self.opt.lambda_NCE
total_nce_loss += loss.mean()
return total_nce_loss / n_layers
| 9,580 | 47.145729 | 204 | py |
query-selected-attention | query-selected-attention-main/models/networks_local.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
###############################################################################
# Helper Functions
###############################################################################
def get_filter(filt_size=3):
if(filt_size == 1):
a = np.array([1., ])
elif(filt_size == 2):
a = np.array([1., 1.])
elif(filt_size == 3):
a = np.array([1., 2., 1.])
elif(filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
return filt
class Downsample(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
class Upsample2(nn.Module):
def __init__(self, scale_factor, mode='nearest'):
super().__init__()
self.factor = scale_factor
self.mode = mode
def forward(self, x):
return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super(Upsample, self).__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size) * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal',
init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt)
elif netG == 'resnet_4blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'resnet_cat':
n_blocks = 8
net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu')
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG))
def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
if netF == 'global_pool':
net = PoolingF()
elif netF == 'reshape':
net = ReshapeF()
elif netF == 'sample':
net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'mlp_sample':
net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'strided_conv':
net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
else:
raise NotImplementedError('projection model name [%s] is not recognized' % netF)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leaky RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == "patch":
net = PatchDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids,
initialize_weights=('stylegan2' not in netD))
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp', 'nonsaturating']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
bs = prediction.size(0)
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
elif self.gan_mode == 'nonsaturating':
if target_is_real:
loss = F.softplus(-prediction).view(bs, -1).mean(dim=1)
else:
loss = F.softplus(prediction).view(bs, -1).mean(dim=1)
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm + 1e-7)
return out
class PoolingF(nn.Module):
def __init__(self):
super(PoolingF, self).__init__()
model = [nn.AdaptiveMaxPool2d(1)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
return self.l2norm(self.model(x))
class ReshapeF(nn.Module):
def __init__(self):
super(ReshapeF, self).__init__()
model = [nn.AdaptiveAvgPool2d(4)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
x = self.model(x)
x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2)
return self.l2norm(x_reshape)
class StridedConvF(nn.Module):
def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]):
super().__init__()
# self.conv1 = nn.Conv2d(256, 128, 3, stride=2)
# self.conv2 = nn.Conv2d(128, 64, 3, stride=1)
self.l2_norm = Normalize(2)
self.mlps = {}
self.moving_averages = {}
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, x):
C, H = x.shape[1], x.shape[2]
n_down = int(np.rint(np.log2(H / 32)))
mlp = []
for i in range(n_down):
mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2))
mlp.append(nn.ReLU())
C = max(C // 2, 64)
mlp.append(nn.Conv2d(C, 64, 3))
mlp = nn.Sequential(*mlp)
init_net(mlp, self.init_type, self.init_gain, self.gpu_ids)
return mlp
def update_moving_average(self, key, x):
if key not in self.moving_averages:
self.moving_averages[key] = x.detach()
self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001
def forward(self, x, use_instance_norm=False):
C, H = x.shape[1], x.shape[2]
key = '%d_%d' % (C, H)
if key not in self.mlps:
self.mlps[key] = self.create_mlp(x)
self.add_module("child_%s" % key, self.mlps[key])
mlp = self.mlps[key]
x = mlp(x)
self.update_moving_average(key, x)
x = x - self.moving_averages[key]
if use_instance_norm:
x = F.instance_norm(x)
return self.l2_norm(x)
class PatchSampleF(nn.Module):
def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):
# potential issues: currently, we use the same patch_ids for multiple images in the batch
super(PatchSampleF, self).__init__()
self.l2norm = Normalize(2)
self.use_mlp = use_mlp
self.nc = nc # hard-coded
self.mlp_init = False
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, feats):
for mlp_id, feat in enumerate(feats):
input_nc = feat.shape[1]
mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
mlp.cuda()
setattr(self, 'mlp_%d' % mlp_id, mlp)
init_net(self, self.init_type, self.init_gain, self.gpu_ids)
self.mlp_init = True
def forward(self, feats, num_patches=64, patch_ids=None, attn_mats=None):
return_ids = []
return_feats = []
return_mats = []
k_s = 7 # kernel size in unfold
if self.use_mlp and not self.mlp_init:
self.create_mlp(feats)
for feat_id, feat in enumerate(feats):
B, C, H, W = feat.shape[0], feat.shape[1], feat.shape[2], feat.shape[3]
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) # B*HW*C
if num_patches > 0:
if feat_id < 3:
if patch_ids is not None:
patch_id = patch_ids[feat_id]
else:
patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device) # random id in [0, HW]
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
attn_qs = torch.zeros(1).to(feat.device)
else:
feat_local = F.unfold(feat, kernel_size=k_s, stride=1, padding=3) # (B, ks*ks*C, L)
L = feat_local.shape[2]
if attn_mats is not None:
patch_id = patch_ids[feat_id]
attn_qs = attn_mats[feat_id]
else:
feat_k = feat_local.permute(0, 2, 1).reshape(B, L, k_s * k_s, C).flatten(0, 1) # (B*L, ks*ks, C)
feat_q = feat_reshape.reshape(B*L, C, 1)
dots_local = torch.bmm(feat_k, feat_q) # (B*L, ks*ks, 1)
attn_local = dots_local.softmax(dim=1)
attn_local = attn_local.reshape(B, L, -1) # (B, L, ks*ks)
prob = -torch.log(attn_local)
prob = torch.where(torch.isinf(prob), torch.full_like(prob, 0), prob)
entropy = torch.sum(torch.mul(attn_local, prob), dim=2)
_, index = torch.sort(entropy)
patch_id = index[:, :num_patches]
attn_qs = attn_local[torch.arange(B)[:, None], patch_id, :] # (B, n_p, ks*ks)
attn_qs = attn_qs.flatten(0, 1).unsqueeze(1) # (B*n_p, 1, ks*ks)
feat_v = feat_local[torch.arange(B)[:, None], :, patch_id].permute(0, 2, 1) # (B, n_p, ks*ks*C)
feat_v = feat_v.flatten(0, 1).view(B*num_patches, k_s*k_s, C)
feat_reshape = torch.bmm(attn_qs, feat_v) # (B*n_p, 1, C)
x_sample = feat_reshape.flatten(0, 1)
else:
x_sample = feat_reshape
patch_id = []
if self.use_mlp:
mlp = getattr(self, 'mlp_%d' % feat_id)
x_sample = mlp(x_sample)
return_ids.append(patch_id)
return_mats.append(attn_qs)
x_sample = self.l2norm(x_sample)
if num_patches == 0:
x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
return_feats.append(x_sample)
return return_feats, return_ids, return_mats
class G_Resnet(nn.Module):
def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64,
norm=None, nl_layer=None):
super(G_Resnet, self).__init__()
n_downsample = num_downs
pad_type = 'reflect'
self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type)
if nz == 0:
self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
else:
self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
def decode(self, content, style=None):
return self.dec(content, style)
def forward(self, image, style=None, nce_layers=[], encode_only=False):
content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only)
if encode_only:
return feats
else:
images_recon = self.decode(content, style)
if len(nce_layers) > 0:
return images_recon, feats
else:
return images_recon
##################################################################################
# Encoder and Decoders
##################################################################################
class E_adaIN(nn.Module):
def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4,
norm=None, nl_layer=None, vae=False):
# style encoder
super(E_adaIN, self).__init__()
self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae)
def forward(self, image):
style = self.enc_style(image)
return style
class StyleEncoder(nn.Module):
def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False):
super(StyleEncoder, self).__init__()
self.vae = vae
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
for i in range(n_downsample - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
if self.vae:
self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0)
self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0)
else:
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
if self.vae:
output = self.model(x)
output = output.view(x.size(0), -1)
output_mean = self.fc_mean(output)
output_var = self.fc_var(output)
return output_mean, output_var
else:
return self.model(x).view(x.size(0), -1)
class ContentEncoder(nn.Module):
def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'):
super(ContentEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
# downsampling blocks
for i in range(n_downsample):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
# residual blocks
self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x, nce_layers=[], encode_only=False):
if len(nce_layers) > 0:
feat = x
feats = []
for layer_id, layer in enumerate(self.model):
feat = layer(feat)
if layer_id in nce_layers:
feats.append(feat)
if layer_id == nce_layers[-1] and encode_only:
return None, feats
return feat, feats
else:
return self.model(x), None
for layer_id, layer in enumerate(self.model):
print(layer_id, layer)
class Decoder_all(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder_all, self).__init__()
# AdaIN residual blocks
self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)
self.n_blocks = 0
# upsampling blocks
for i in range(n_upsample):
block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block))
self.n_blocks += 1
dim //= 2
# use reflection padding in the last conv layer
setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect'))
self.n_blocks += 1
def forward(self, x, y=None):
if y is not None:
output = self.resnet_block(cat_feature(x, y))
for n in range(self.n_blocks):
block = getattr(self, 'block_{:d}'.format(n))
if n > 0:
output = block(cat_feature(output, y))
else:
output = block(output)
return output
class Decoder(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder, self).__init__()
self.model = []
# AdaIN residual blocks
self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)]
# upsampling blocks
for i in range(n_upsample):
if i == 0:
input_dim = dim + nz
else:
input_dim = dim
self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
dim //= 2
# use reflection padding in the last conv layer
self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')]
self.model = nn.Sequential(*self.model)
def forward(self, x, y=None):
if y is not None:
return self.model(cat_feature(x, y))
else:
return self.model(x)
##################################################################################
# Sequential Models
##################################################################################
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
##################################################################################
# Basic Blocks
##################################################################################
def cat_feature(x, y):
y_expand = y.view(y.size(0), y.size(1), 1, 1).expand(
y.size(0), y.size(1), x.size(2), x.size(3))
x_cat = torch.cat([x, y_expand], 1)
return x_cat
class ResBlock(nn.Module):
def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlock, self).__init__()
model = []
model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', pad_type='zero'):
super(Conv2dBlock, self).__init__()
self.use_bias = True
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
# initialize convolution
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
##################################################################################
# Normalization layers
##################################################################################
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.opt = opt
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if no_antialias_up:
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1, # output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, layers=[], encode_only=False):
if -1 in layers:
layers.append(len(self.model))
if len(layers) > 0:
feat = input
feats = []
for layer_id, layer in enumerate(self.model):
# print(layer_id, layer)
feat = layer(feat)
if layer_id in layers:
# print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
feats.append(feat)
else:
# print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
pass
if layer_id == layers[-1] and encode_only:
# print('encoder only return features')
return feats # return intermediate features alone; stop in the last layers
return feat, feats # return both output and intermediate features
else:
"""Standard forward"""
fake = self.model(input)
return fake
class ResnetDecoder(nn.Module):
"""Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations.
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based decoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetDecoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = []
n_downsampling = 2
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if(no_antialias):
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetEncoder(nn.Module):
"""Resnet-based encoder that consists of a few downsampling + several Resnet blocks
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based encoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetEncoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
if(no_antialias):
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
else:
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
if(no_antialias):
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
else:
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
Downsample(ndf * nf_mult)]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class PatchDiscriminator(NLayerDiscriminator):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
super().__init__(input_nc, ndf, 2, norm_layer, no_antialias)
def forward(self, input):
B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
size = 16
Y = H // size
X = W // size
input = input.view(B, C, Y, size, X, size)
input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
return super().forward(input)
class GroupedChannelNorm(nn.Module):
def __init__(self, num_groups):
super().__init__()
self.num_groups = num_groups
def forward(self, x):
shape = list(x.shape)
new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:]
x = x.view(*new_shape)
mean = x.mean(dim=2, keepdim=True)
std = x.std(dim=2, keepdim=True)
x_norm = (x - mean) / (std + 1e-7)
return x_norm.view(*shape)
| 61,828 | 42.480309 | 187 | py |
query-selected-attention | query-selected-attention-main/models/networks_global.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
###############################################################################
# Helper Functions
###############################################################################
def get_filter(filt_size=3):
if(filt_size == 1):
a = np.array([1., ])
elif(filt_size == 2):
a = np.array([1., 1.])
elif(filt_size == 3):
a = np.array([1., 2., 1.])
elif(filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
return filt
class Downsample(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
class Upsample2(nn.Module):
def __init__(self, scale_factor, mode='nearest'):
super().__init__()
self.factor = scale_factor
self.mode = mode
def forward(self, x):
return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super(Upsample, self).__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size) * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal',
init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt)
elif netG == 'resnet_4blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'resnet_cat':
n_blocks = 8
net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu')
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG))
def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
if netF == 'global_pool':
net = PoolingF()
elif netF == 'reshape':
net = ReshapeF()
elif netF == 'sample':
net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'mlp_sample':
net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'strided_conv':
net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
else:
raise NotImplementedError('projection model name [%s] is not recognized' % netF)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leaky RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == "patch":
net = PatchDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids,
initialize_weights=('stylegan2' not in netD))
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp', 'nonsaturating']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
bs = prediction.size(0)
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
elif self.gan_mode == 'nonsaturating':
if target_is_real:
loss = F.softplus(-prediction).view(bs, -1).mean(dim=1)
else:
loss = F.softplus(prediction).view(bs, -1).mean(dim=1)
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm + 1e-7)
return out
class PoolingF(nn.Module):
def __init__(self):
super(PoolingF, self).__init__()
model = [nn.AdaptiveMaxPool2d(1)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
return self.l2norm(self.model(x))
class ReshapeF(nn.Module):
def __init__(self):
super(ReshapeF, self).__init__()
model = [nn.AdaptiveAvgPool2d(4)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
x = self.model(x)
x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2)
return self.l2norm(x_reshape)
class StridedConvF(nn.Module):
def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]):
super().__init__()
# self.conv1 = nn.Conv2d(256, 128, 3, stride=2)
# self.conv2 = nn.Conv2d(128, 64, 3, stride=1)
self.l2_norm = Normalize(2)
self.mlps = {}
self.moving_averages = {}
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, x):
C, H = x.shape[1], x.shape[2]
n_down = int(np.rint(np.log2(H / 32)))
mlp = []
for i in range(n_down):
mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2))
mlp.append(nn.ReLU())
C = max(C // 2, 64)
mlp.append(nn.Conv2d(C, 64, 3))
mlp = nn.Sequential(*mlp)
init_net(mlp, self.init_type, self.init_gain, self.gpu_ids)
return mlp
def update_moving_average(self, key, x):
if key not in self.moving_averages:
self.moving_averages[key] = x.detach()
self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001
def forward(self, x, use_instance_norm=False):
C, H = x.shape[1], x.shape[2]
key = '%d_%d' % (C, H)
if key not in self.mlps:
self.mlps[key] = self.create_mlp(x)
self.add_module("child_%s" % key, self.mlps[key])
mlp = self.mlps[key]
x = mlp(x)
self.update_moving_average(key, x)
x = x - self.moving_averages[key]
if use_instance_norm:
x = F.instance_norm(x)
return self.l2_norm(x)
class PatchSampleF(nn.Module):
def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):
# potential issues: currently, we use the same patch_ids for multiple images in the batch
super(PatchSampleF, self).__init__()
self.l2norm = Normalize(2)
self.use_mlp = use_mlp
self.nc = nc # hard-coded
self.mlp_init = False
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, feats):
for mlp_id, feat in enumerate(feats):
input_nc = feat.shape[1]
mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
mlp.cuda()
setattr(self, 'mlp_%d' % mlp_id, mlp)
init_net(self, self.init_type, self.init_gain, self.gpu_ids)
self.mlp_init = True
def forward(self, feats, num_patches=64, patch_ids=None, attn_mats=None):
return_ids = []
return_feats = []
return_mats = []
if self.use_mlp and not self.mlp_init:
self.create_mlp(feats)
for feat_id, feat in enumerate(feats):
B, C, H, W = feat.shape[0], feat.shape[1], feat.shape[2], feat.shape[3]
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) # B*HW*C
if num_patches > 0:
if feat_id < 3:
if patch_ids is not None:
patch_id = patch_ids[feat_id]
else:
patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device) # random id in [0, HW]
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
attn_qs = torch.zeros(1).to(feat.device)
else:
if attn_mats is not None:
attn_qs = attn_mats[feat_id]
else:
feat_q = feat_reshape
feat_k = feat_reshape.permute(0, 2, 1)
dots = torch.bmm(feat_q, feat_k) # (B, HW, HW)
attn = dots.softmax(dim=2)
prob = -torch.log(attn)
prob = torch.where(torch.isinf(prob), torch.full_like(prob, 0), prob)
entropy = torch.sum(torch.mul(attn, prob), dim=2)
_, index = torch.sort(entropy)
patch_id = index[:, :num_patches]
attn_qs = attn[torch.arange(B)[:, None], patch_id, :]
feat_reshape = torch.bmm(attn_qs, feat_reshape) # (B, n_p, C)
x_sample = feat_reshape.flatten(0, 1)
patch_id = []
else:
x_sample = feat_reshape
patch_id = []
if self.use_mlp:
mlp = getattr(self, 'mlp_%d' % feat_id)
x_sample = mlp(x_sample)
return_ids.append(patch_id)
return_mats.append(attn_qs)
x_sample = self.l2norm(x_sample)
if num_patches == 0:
x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
return_feats.append(x_sample)
return return_feats, return_ids, return_mats
class G_Resnet(nn.Module):
def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64,
norm=None, nl_layer=None):
super(G_Resnet, self).__init__()
n_downsample = num_downs
pad_type = 'reflect'
self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type)
if nz == 0:
self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
else:
self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
def decode(self, content, style=None):
return self.dec(content, style)
def forward(self, image, style=None, nce_layers=[], encode_only=False):
content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only)
if encode_only:
return feats
else:
images_recon = self.decode(content, style)
if len(nce_layers) > 0:
return images_recon, feats
else:
return images_recon
##################################################################################
# Encoder and Decoders
##################################################################################
class E_adaIN(nn.Module):
def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4,
norm=None, nl_layer=None, vae=False):
# style encoder
super(E_adaIN, self).__init__()
self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae)
def forward(self, image):
style = self.enc_style(image)
return style
class StyleEncoder(nn.Module):
def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False):
super(StyleEncoder, self).__init__()
self.vae = vae
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
for i in range(n_downsample - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
if self.vae:
self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0)
self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0)
else:
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
if self.vae:
output = self.model(x)
output = output.view(x.size(0), -1)
output_mean = self.fc_mean(output)
output_var = self.fc_var(output)
return output_mean, output_var
else:
return self.model(x).view(x.size(0), -1)
class ContentEncoder(nn.Module):
def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'):
super(ContentEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
# downsampling blocks
for i in range(n_downsample):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
# residual blocks
self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x, nce_layers=[], encode_only=False):
if len(nce_layers) > 0:
feat = x
feats = []
for layer_id, layer in enumerate(self.model):
feat = layer(feat)
if layer_id in nce_layers:
feats.append(feat)
if layer_id == nce_layers[-1] and encode_only:
return None, feats
return feat, feats
else:
return self.model(x), None
for layer_id, layer in enumerate(self.model):
print(layer_id, layer)
class Decoder_all(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder_all, self).__init__()
# AdaIN residual blocks
self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)
self.n_blocks = 0
# upsampling blocks
for i in range(n_upsample):
block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block))
self.n_blocks += 1
dim //= 2
# use reflection padding in the last conv layer
setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect'))
self.n_blocks += 1
def forward(self, x, y=None):
if y is not None:
output = self.resnet_block(cat_feature(x, y))
for n in range(self.n_blocks):
block = getattr(self, 'block_{:d}'.format(n))
if n > 0:
output = block(cat_feature(output, y))
else:
output = block(output)
return output
class Decoder(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder, self).__init__()
self.model = []
# AdaIN residual blocks
self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)]
# upsampling blocks
for i in range(n_upsample):
if i == 0:
input_dim = dim + nz
else:
input_dim = dim
self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
dim //= 2
# use reflection padding in the last conv layer
self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')]
self.model = nn.Sequential(*self.model)
def forward(self, x, y=None):
if y is not None:
return self.model(cat_feature(x, y))
else:
return self.model(x)
##################################################################################
# Sequential Models
##################################################################################
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
##################################################################################
# Basic Blocks
##################################################################################
def cat_feature(x, y):
y_expand = y.view(y.size(0), y.size(1), 1, 1).expand(
y.size(0), y.size(1), x.size(2), x.size(3))
x_cat = torch.cat([x, y_expand], 1)
return x_cat
class ResBlock(nn.Module):
def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlock, self).__init__()
model = []
model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', pad_type='zero'):
super(Conv2dBlock, self).__init__()
self.use_bias = True
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
# initialize convolution
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
##################################################################################
# Normalization layers
##################################################################################
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.opt = opt
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if no_antialias_up:
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1, # output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, layers=[], encode_only=False):
if -1 in layers:
layers.append(len(self.model))
if len(layers) > 0:
feat = input
feats = []
for layer_id, layer in enumerate(self.model):
# print(layer_id, layer)
feat = layer(feat)
if layer_id in layers:
# print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
feats.append(feat)
else:
# print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
pass
if layer_id == layers[-1] and encode_only:
# print('encoder only return features')
return feats # return intermediate features alone; stop in the last layers
return feat, feats # return both output and intermediate features
else:
"""Standard forward"""
fake = self.model(input)
return fake
class ResnetDecoder(nn.Module):
"""Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations.
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based decoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetDecoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = []
n_downsampling = 2
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if(no_antialias):
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetEncoder(nn.Module):
"""Resnet-based encoder that consists of a few downsampling + several Resnet blocks
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based encoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetEncoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
if(no_antialias):
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
else:
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
if(no_antialias):
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
else:
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
Downsample(ndf * nf_mult)]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class PatchDiscriminator(NLayerDiscriminator):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
super().__init__(input_nc, ndf, 2, norm_layer, no_antialias)
def forward(self, input):
B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
size = 16
Y = H // size
X = W // size
input = input.view(B, C, Y, size, X, size)
input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
return super().forward(input)
class GroupedChannelNorm(nn.Module):
def __init__(self, num_groups):
super().__init__()
self.num_groups = num_groups
def forward(self, x):
shape = list(x.shape)
new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:]
x = x.view(*new_shape)
mean = x.mean(dim=2, keepdim=True)
std = x.std(dim=2, keepdim=True)
x_norm = (x - mean) / (std + 1e-7)
return x_norm.view(*shape)
| 61,118 | 42.19364 | 187 | py |
query-selected-attention | query-selected-attention-main/models/__init__.py | """This package contains modules related to objective functions, optimizations, and network architectures.
To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
You need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate loss, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
In the function <__init__>, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
Now you can use the model class by specifying flag '--model dummy'.
See our template model class 'template_model.py' for more details.
"""
import importlib
from models.base_model import BaseModel
def find_model_using_name(model_name):
"""Import the module "models/[model_name]_model.py".
In the file, the class called DatasetNameModel() will
be instantiated. It has to be a subclass of BaseModel,
and it is case-insensitive.
"""
model_filename = "models." + model_name + "_model"
modellib = importlib.import_module(model_filename)
model = None
target_model_name = model_name.replace('_', '') + 'model'
for name, cls in modellib.__dict__.items():
if name.lower() == target_model_name.lower() \
and issubclass(cls, BaseModel):
model = cls
if model is None:
print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
exit(0)
return model
def get_option_setter(model_name):
"""Return the static method <modify_commandline_options> of the model class."""
model_class = find_model_using_name(model_name)
return model_class.modify_commandline_options
def create_model(opt):
"""Create a model given the option.
This function warps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from models import create_model
>>> model = create_model(opt)
"""
model = find_model_using_name(opt.model)
instance = model(opt)
print("model [%s] was created" % type(instance).__name__)
return instance
| 3,072 | 44.191176 | 250 | py |
query-selected-attention | query-selected-attention-main/models/networks_local_global.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import numpy as np
###############################################################################
# Helper Functions
###############################################################################
def get_filter(filt_size=3):
if(filt_size == 1):
a = np.array([1., ])
elif(filt_size == 2):
a = np.array([1., 1.])
elif(filt_size == 3):
a = np.array([1., 2., 1.])
elif(filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(filt_size == 5):
a = np.array([1., 4., 6., 4., 1.])
elif(filt_size == 6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(filt_size == 7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
return filt
class Downsample(nn.Module):
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size == 1):
if(self.pad_off == 0):
return inp[:, :, ::self.stride, ::self.stride]
else:
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
class Upsample2(nn.Module):
def __init__(self, scale_factor, mode='nearest'):
super().__init__()
self.factor = scale_factor
self.mode = mode
def forward(self, x):
return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super(Upsample, self).__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
filt = get_filter(filt_size=self.filt_size) * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl', 'reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl', 'replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type == 'zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized' % pad_type)
return PadLayer
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if debug:
print(classname)
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# if not amp:
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
if initialize_weights:
init_weights(net, init_type, init_gain=init_gain, debug=debug)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal',
init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt)
elif netG == 'resnet_4blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'resnet_cat':
n_blocks = 8
net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu')
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG))
def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
if netF == 'global_pool':
net = PoolingF()
elif netF == 'reshape':
net = ReshapeF()
elif netF == 'sample':
net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'mlp_sample':
net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'strided_conv':
net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
else:
raise NotImplementedError('projection model name [%s] is not recognized' % netF)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leaky RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == "patch":
net = PatchDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids,
initialize_weights=('stylegan2' not in netD))
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp', 'nonsaturating']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
bs = prediction.size(0)
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
elif self.gan_mode == 'nonsaturating':
if target_is_real:
loss = F.softplus(-prediction).view(bs, -1).mean(dim=1)
else:
loss = F.softplus(prediction).view(bs, -1).mean(dim=1)
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm + 1e-7)
return out
class PoolingF(nn.Module):
def __init__(self):
super(PoolingF, self).__init__()
model = [nn.AdaptiveMaxPool2d(1)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
return self.l2norm(self.model(x))
class ReshapeF(nn.Module):
def __init__(self):
super(ReshapeF, self).__init__()
model = [nn.AdaptiveAvgPool2d(4)]
self.model = nn.Sequential(*model)
self.l2norm = Normalize(2)
def forward(self, x):
x = self.model(x)
x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2)
return self.l2norm(x_reshape)
class StridedConvF(nn.Module):
def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]):
super().__init__()
# self.conv1 = nn.Conv2d(256, 128, 3, stride=2)
# self.conv2 = nn.Conv2d(128, 64, 3, stride=1)
self.l2_norm = Normalize(2)
self.mlps = {}
self.moving_averages = {}
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, x):
C, H = x.shape[1], x.shape[2]
n_down = int(np.rint(np.log2(H / 32)))
mlp = []
for i in range(n_down):
mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2))
mlp.append(nn.ReLU())
C = max(C // 2, 64)
mlp.append(nn.Conv2d(C, 64, 3))
mlp = nn.Sequential(*mlp)
init_net(mlp, self.init_type, self.init_gain, self.gpu_ids)
return mlp
def update_moving_average(self, key, x):
if key not in self.moving_averages:
self.moving_averages[key] = x.detach()
self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001
def forward(self, x, use_instance_norm=False):
C, H = x.shape[1], x.shape[2]
key = '%d_%d' % (C, H)
if key not in self.mlps:
self.mlps[key] = self.create_mlp(x)
self.add_module("child_%s" % key, self.mlps[key])
mlp = self.mlps[key]
x = mlp(x)
self.update_moving_average(key, x)
x = x - self.moving_averages[key]
if use_instance_norm:
x = F.instance_norm(x)
return self.l2_norm(x)
class PatchSampleF(nn.Module):
def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):
# potential issues: currently, we use the same patch_ids for multiple images in the batch
super(PatchSampleF, self).__init__()
self.l2norm = Normalize(2)
self.use_mlp = use_mlp
self.nc = nc # hard-coded
self.mlp_init = False
self.init_type = init_type
self.init_gain = init_gain
self.gpu_ids = gpu_ids
def create_mlp(self, feats):
for mlp_id, feat in enumerate(feats):
input_nc = feat.shape[1]
mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
mlp.cuda()
setattr(self, 'mlp_%d' % mlp_id, mlp)
init_net(self, self.init_type, self.init_gain, self.gpu_ids)
self.mlp_init = True
def forward(self, feats, num_patches=64, patch_ids=None, attn_mats=None):
return_ids = []
return_feats = []
return_mats = []
k_s = 7 # kernel size in unfold
if self.use_mlp and not self.mlp_init:
self.create_mlp(feats)
for feat_id, feat in enumerate(feats):
B, C, H, W = feat.shape[0], feat.shape[1], feat.shape[2], feat.shape[3]
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) # B*HW*C
if num_patches > 0:
if feat_id < 3:
if patch_ids is not None:
patch_id = patch_ids[feat_id]
else:
patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device) # random id in [0, HW]
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
attn_qs = torch.zeros(1).to(feat.device)
else:
if attn_mats is not None:
attn_qs = attn_mats[feat_id]
else:
feat_local = F.unfold(feat, kernel_size=k_s, stride=1, padding=3) # (B, ks*ks*C, L)
L = feat_local.shape[2]
feat_k_local = feat_local.permute(0, 2, 1).reshape(B, L, k_s*k_s, C).flatten(0, 1) # (B*L, ks*ks, C)
feat_q_local = feat_reshape.reshape(B*L, C, 1)
dots_local = torch.bmm(feat_k_local, feat_q_local) # (B*L, ks*ks, 1)
attn_local = dots_local.softmax(dim=1)
attn_local = attn_local.reshape(B, L, -1) # (B, L, ks*ks)
prob = -torch.log(attn_local)
prob = torch.where(torch.isinf(prob), torch.full_like(prob, 0), prob)
entropy = torch.sum(torch.mul(attn_local, prob), dim=2)
_, index = torch.sort(entropy)
patch_id = index[:, :num_patches]
feat_q_global = feat_reshape
feat_k_global = feat_reshape.permute(0, 2, 1)
dots_global = torch.bmm(feat_q_global, feat_k_global) # (B, HW, HW)
attn_global = dots_global.softmax(dim=2)
attn_qs = attn_global[torch.arange(B)[:, None], patch_id, :]
feat_reshape = torch.bmm(attn_qs, feat_reshape) # (B, n_p, C)
x_sample = feat_reshape.flatten(0, 1)
patch_id = []
else:
x_sample = feat_reshape
patch_id = []
if self.use_mlp:
mlp = getattr(self, 'mlp_%d' % feat_id)
x_sample = mlp(x_sample)
return_ids.append(patch_id)
return_mats.append(attn_qs)
x_sample = self.l2norm(x_sample)
if num_patches == 0:
x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
return_feats.append(x_sample)
return return_feats, return_ids, return_mats
class G_Resnet(nn.Module):
def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64,
norm=None, nl_layer=None):
super(G_Resnet, self).__init__()
n_downsample = num_downs
pad_type = 'reflect'
self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type)
if nz == 0:
self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
else:
self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
def decode(self, content, style=None):
return self.dec(content, style)
def forward(self, image, style=None, nce_layers=[], encode_only=False):
content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only)
if encode_only:
return feats
else:
images_recon = self.decode(content, style)
if len(nce_layers) > 0:
return images_recon, feats
else:
return images_recon
##################################################################################
# Encoder and Decoders
##################################################################################
class E_adaIN(nn.Module):
def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4,
norm=None, nl_layer=None, vae=False):
# style encoder
super(E_adaIN, self).__init__()
self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae)
def forward(self, image):
style = self.enc_style(image)
return style
class StyleEncoder(nn.Module):
def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False):
super(StyleEncoder, self).__init__()
self.vae = vae
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
for i in range(n_downsample - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
if self.vae:
self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0)
self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0)
else:
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
if self.vae:
output = self.model(x)
output = output.view(x.size(0), -1)
output_mean = self.fc_mean(output)
output_var = self.fc_var(output)
return output_mean, output_var
else:
return self.model(x).view(x.size(0), -1)
class ContentEncoder(nn.Module):
def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'):
super(ContentEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
# downsampling blocks
for i in range(n_downsample):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
dim *= 2
# residual blocks
self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x, nce_layers=[], encode_only=False):
if len(nce_layers) > 0:
feat = x
feats = []
for layer_id, layer in enumerate(self.model):
feat = layer(feat)
if layer_id in nce_layers:
feats.append(feat)
if layer_id == nce_layers[-1] and encode_only:
return None, feats
return feat, feats
else:
return self.model(x), None
for layer_id, layer in enumerate(self.model):
print(layer_id, layer)
class Decoder_all(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder_all, self).__init__()
# AdaIN residual blocks
self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)
self.n_blocks = 0
# upsampling blocks
for i in range(n_upsample):
block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block))
self.n_blocks += 1
dim //= 2
# use reflection padding in the last conv layer
setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect'))
self.n_blocks += 1
def forward(self, x, y=None):
if y is not None:
output = self.resnet_block(cat_feature(x, y))
for n in range(self.n_blocks):
block = getattr(self, 'block_{:d}'.format(n))
if n > 0:
output = block(cat_feature(output, y))
else:
output = block(output)
return output
class Decoder(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
super(Decoder, self).__init__()
self.model = []
# AdaIN residual blocks
self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)]
# upsampling blocks
for i in range(n_upsample):
if i == 0:
input_dim = dim + nz
else:
input_dim = dim
self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
dim //= 2
# use reflection padding in the last conv layer
self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')]
self.model = nn.Sequential(*self.model)
def forward(self, x, y=None):
if y is not None:
return self.model(cat_feature(x, y))
else:
return self.model(x)
##################################################################################
# Sequential Models
##################################################################################
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
##################################################################################
# Basic Blocks
##################################################################################
def cat_feature(x, y):
y_expand = y.view(y.size(0), y.size(1), 1, 1).expand(
y.size(0), y.size(1), x.size(2), x.size(3))
x_cat = torch.cat([x, y_expand], 1)
return x_cat
class ResBlock(nn.Module):
def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
super(ResBlock, self).__init__()
model = []
model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', pad_type='zero'):
super(Conv2dBlock, self).__init__()
self.use_bias = True
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
# initialize convolution
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
##################################################################################
# Normalization layers
##################################################################################
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.opt = opt
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if no_antialias_up:
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1, # output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, layers=[], encode_only=False):
if -1 in layers:
layers.append(len(self.model))
if len(layers) > 0:
feat = input
feats = []
for layer_id, layer in enumerate(self.model):
# print(layer_id, layer)
feat = layer(feat)
if layer_id in layers:
# print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
feats.append(feat)
else:
# print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
pass
if layer_id == layers[-1] and encode_only:
# print('encoder only return features')
return feats # return intermediate features alone; stop in the last layers
return feat, feats # return both output and intermediate features
else:
"""Standard forward"""
fake = self.model(input)
return fake
class ResnetDecoder(nn.Module):
"""Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations.
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based decoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetDecoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = []
n_downsampling = 2
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
if(no_antialias):
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
model += [Upsample(ngf * mult),
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetEncoder(nn.Module):
"""Resnet-based encoder that consists of a few downsampling + several Resnet blocks
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
"""Construct a Resnet-based encoder
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetEncoder, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if(no_antialias):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True),
Downsample(ngf * mult * 2)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
if(no_antialias):
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
else:
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
if(no_antialias):
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
else:
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
Downsample(ndf * nf_mult)]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class PatchDiscriminator(NLayerDiscriminator):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
super().__init__(input_nc, ndf, 2, norm_layer, no_antialias)
def forward(self, input):
B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
size = 16
Y = H // size
X = W // size
input = input.view(B, C, Y, size, X, size)
input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
return super().forward(input)
class GroupedChannelNorm(nn.Module):
def __init__(self, num_groups):
super().__init__()
self.num_groups = num_groups
def forward(self, x):
shape = list(x.shape)
new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:]
x = x.view(*new_shape)
mean = x.mean(dim=2, keepdim=True)
std = x.std(dim=2, keepdim=True)
x_norm = (x - mean) / (std + 1e-7)
return x_norm.view(*shape)
| 61,819 | 42.443429 | 187 | py |
query-selected-attention | query-selected-attention-main/util/image_pool.py | import random
import torch
class ImagePool():
"""This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class
Parameters:
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
"""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_imgs = 0
self.images = []
def query(self, images):
"""Return an image from the pool.
Parameters:
images: the latest generated images from the generator
Returns images from the buffer.
By 50/100, the buffer will return input images.
By 50/100, the buffer will return images previously stored in the buffer,
and insert the current images to the buffer.
"""
if self.pool_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_images.append(image)
return_images = torch.cat(return_images, 0) # collect all the images and return
return return_images
| 2,226 | 39.490909 | 140 | py |
query-selected-attention | query-selected-attention-main/util/html.py | import dominate
from dominate.tags import meta, h3, table, tr, td, p, a, img, br
import os
class HTML:
"""This HTML class allows us to save images and write texts into a single HTML file.
It consists of functions such as <add_header> (add a text header to the HTML file),
<add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).
It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
"""
def __init__(self, web_dir, title, refresh=0):
"""Initialize the HTML classes
Parameters:
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
title (str) -- the webpage name
refresh (int) -- how often the website refresh itself; if 0; no refreshing
"""
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if refresh > 0:
with self.doc.head:
meta(http_equiv="refresh", content=str(refresh))
def get_image_dir(self):
"""Return the directory that stores images"""
return self.img_dir
def add_header(self, text):
"""Insert a header to the HTML file
Parameters:
text (str) -- the header text
"""
with self.doc:
h3(text)
def add_images(self, ims, txts, links, width=400):
"""add images to the HTML file
Parameters:
ims (str list) -- a list of image paths
txts (str list) -- a list of image names shown on the website
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
"""
self.t = table(border=1, style="table-layout: fixed;") # Insert a table
self.doc.add(self.t)
with self.t:
with tr():
for im, txt, link in zip(ims, txts, links):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
with a(href=os.path.join('images', link)):
img(style="width:%dpx" % width, src=os.path.join('images', im))
br()
p(txt)
def save(self):
"""save the current content to the HMTL file"""
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
if __name__ == '__main__': # we show an example usage here.
html = HTML('web/', 'test_html')
html.add_header('hello world')
ims, txts, links = [], [], []
for n in range(4):
ims.append('image_%d.png' % n)
txts.append('text_%d' % n)
links.append('image_%d.png' % n)
html.add_images(ims, txts, links)
html.save()
| 3,223 | 36.057471 | 157 | py |
query-selected-attention | query-selected-attention-main/util/visualizer.py | import numpy as np
import os
import sys
import ntpath
import time
from . import util, html
from subprocess import Popen, PIPE
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
image_name = '%s/%s.png' % (label, name)
os.makedirs(os.path.join(image_dir, label), exist_ok=True)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
if opt.display_id is None:
self.display_id = np.random.randint(100000) * 10 # just a random display id
else:
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.port = opt.display_port
self.saved = False
if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>
import visdom
self.plot_data = {}
self.ncols = opt.display_ncols
if "tensorboard_base_url" not in os.environ:
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
else:
self.vis = visdom.Visdom(port=2004,
base_url=os.environ['tensorboard_base_url'] + '/visdom')
if not self.vis.check_connection():
self.create_visdom_connections()
if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
"""Reset the self.saved status"""
self.saved = False
def create_visdom_connections(self):
"""If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
print('Command: %s' % cmd)
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
def display_current_results(self, visuals, epoch, save_result):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
if self.display_id > 0: # show images in the browser using visdom
ncols = self.ncols
if ncols > 0: # show all the images in one visdom panel
ncols = min(ncols, len(visuals))
h, w = next(iter(visuals.values())).shape[:2]
table_css = """<style>
table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
</style>""" % (w, h) # create a table css
# create a table of images.
title = self.name
label_html = ''
label_html_row = ''
images = []
idx = 0
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
label_html_row += '<td>%s</td>' % label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if idx % ncols == 0:
label_html += '<tr>%s</tr>' % label_html_row
label_html_row = ''
white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
while idx % ncols != 0:
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if label_html_row != '':
label_html += '<tr>%s</tr>' % label_html_row
try:
self.vis.images(images, ncols, 2, self.display_id + 1,
None, dict(title=title + ' images'))
label_html = '<table>%s</table>' % label_html
self.vis.text(table_css + label_html, win=self.display_id + 2,
opts=dict(title=title + ' labels'))
except VisdomExceptionBase:
self.create_visdom_connections()
else: # show each image in a separate visdom panel;
idx = 1
try:
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
self.vis.image(
image_numpy.transpose([2, 0, 1]),
self.display_id + idx,
None,
dict(title=label)
)
idx += 1
except VisdomExceptionBase:
self.create_visdom_connections()
if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
self.saved = True
# save images to the disk
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
image_numpy = util.tensor2im(image)
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_losses(self, epoch, counter_ratio, losses):
"""display the current losses on visdom display: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
if len(losses) == 0:
return
plot_name = '_'.join(list(losses.keys()))
if plot_name not in self.plot_data:
self.plot_data[plot_name] = {'X': [], 'Y': [], 'legend': list(losses.keys())}
plot_data = self.plot_data[plot_name]
plot_id = list(self.plot_data.keys()).index(plot_name)
plot_data['X'].append(epoch + counter_ratio)
plot_data['Y'].append([losses[k] for k in plot_data['legend']])
try:
self.vis.line(
X=np.stack([np.array(plot_data['X'])] * len(plot_data['legend']), 1),
Y=np.array(plot_data['Y']),
opts={
'title': self.name,
'legend': plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id - plot_id)
except VisdomExceptionBase:
self.create_visdom_connections()
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
| 11,187 | 45.041152 | 139 | py |
query-selected-attention | query-selected-attention-main/util/util.py | """This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import importlib
import argparse
from argparse import Namespace
import torchvision
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def copyconf(default_opt, **kwargs):
conf = Namespace(**vars(default_opt))
for key in kwargs:
setattr(conf, key, kwargs[key])
return conf
def find_class_in_module(target_cls_name, module):
target_cls_name = target_cls_name.replace('_', '').lower()
clslib = importlib.import_module(module)
cls = None
for name, clsobj in clslib.__dict__.items():
if name.lower() == target_cls_name:
cls = clsobj
assert cls is not None, "In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name)
return cls
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].clamp(-1.0, 1.0).cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio is None:
pass
elif aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
elif aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def correct_resize_label(t, size):
device = t.device
t = t.detach().cpu()
resized = []
for i in range(t.size(0)):
one_t = t[i, :1]
one_np = np.transpose(one_t.numpy().astype(np.uint8), (1, 2, 0))
one_np = one_np[:, :, 0]
one_image = Image.fromarray(one_np).resize(size, Image.NEAREST)
resized_t = torch.from_numpy(np.array(one_image)).long()
resized.append(resized_t)
return torch.stack(resized, dim=0).to(device)
def correct_resize(t, size, mode=Image.BICUBIC):
device = t.device
t = t.detach().cpu()
resized = []
for i in range(t.size(0)):
one_t = t[i:i + 1]
one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC)
resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0
resized.append(resized_t)
return torch.stack(resized, dim=0).to(device)
| 5,135 | 29.754491 | 145 | py |
query-selected-attention | query-selected-attention-main/util/__init__.py | """This package includes a miscellaneous collection of useful helper functions."""
from util import *
| 102 | 33.333333 | 82 | py |
query-selected-attention | query-selected-attention-main/util/get_data.py | from __future__ import print_function
import os
import tarfile
import requests
from warnings import warn
from zipfile import ZipFile
from bs4 import BeautifulSoup
from os.path import abspath, isdir, join, basename
class GetData(object):
"""A Python script for downloading CycleGAN or pix2pix datasets.
Parameters:
technique (str) -- One of: 'cyclegan' or 'pix2pix'.
verbose (bool) -- If True, print additional information.
Examples:
>>> from util.get_data import GetData
>>> gd = GetData(technique='cyclegan')
>>> new_data_path = gd.get(save_path='./datasets') # options will be displayed.
Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh'
and 'scripts/download_cyclegan_model.sh'.
"""
def __init__(self, technique='cyclegan', verbose=True):
url_dict = {
'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/',
'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'
}
self.url = url_dict.get(technique.lower())
self._verbose = verbose
def _print(self, text):
if self._verbose:
print(text)
@staticmethod
def _get_options(r):
soup = BeautifulSoup(r.text, 'lxml')
options = [h.text for h in soup.find_all('a', href=True)
if h.text.endswith(('.zip', 'tar.gz'))]
return options
def _present_options(self):
r = requests.get(self.url)
options = self._get_options(r)
print('Options:\n')
for i, o in enumerate(options):
print("{0}: {1}".format(i, o))
choice = input("\nPlease enter the number of the "
"dataset above you wish to download:")
return options[int(choice)]
def _download_data(self, dataset_url, save_path):
if not isdir(save_path):
os.makedirs(save_path)
base = basename(dataset_url)
temp_save_path = join(save_path, base)
with open(temp_save_path, "wb") as f:
r = requests.get(dataset_url)
f.write(r.content)
if base.endswith('.tar.gz'):
obj = tarfile.open(temp_save_path)
elif base.endswith('.zip'):
obj = ZipFile(temp_save_path, 'r')
else:
raise ValueError("Unknown File Type: {0}.".format(base))
self._print("Unpacking Data...")
obj.extractall(save_path)
obj.close()
os.remove(temp_save_path)
def get(self, save_path, dataset=None):
"""
Download a dataset.
Parameters:
save_path (str) -- A directory to save the data to.
dataset (str) -- (optional). A specific dataset to download.
Note: this must include the file extension.
If None, options will be presented for you
to choose from.
Returns:
save_path_full (str) -- the absolute path to the downloaded data.
"""
if dataset is None:
selected_dataset = self._present_options()
else:
selected_dataset = dataset
save_path_full = join(save_path, selected_dataset.split('.')[0])
if isdir(save_path_full):
warn("\n'{0}' already exists. Voiding Download.".format(
save_path_full))
else:
self._print('Downloading Data...')
url = "{0}/{1}".format(self.url, selected_dataset)
self._download_data(url, save_path=save_path)
return abspath(save_path_full)
| 3,639 | 31.792793 | 90 | py |
query-selected-attention | query-selected-attention-main/datasets/combine_A_and_B.py | import os
import numpy as np
import cv2
import argparse
parser = argparse.ArgumentParser('create image pairs')
parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='../dataset/50kshoes_edges')
parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='../dataset/50kshoes_jpg')
parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB')
parser.add_argument('--num_imgs', dest='num_imgs', help='number of images', type=int, default=1000000)
parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)', action='store_true')
args = parser.parse_args()
for arg in vars(args):
print('[%s] = ' % arg, getattr(args, arg))
splits = os.listdir(args.fold_A)
for sp in splits:
img_fold_A = os.path.join(args.fold_A, sp)
img_fold_B = os.path.join(args.fold_B, sp)
img_list = os.listdir(img_fold_A)
if args.use_AB:
img_list = [img_path for img_path in img_list if '_A.' in img_path]
num_imgs = min(args.num_imgs, len(img_list))
print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list)))
img_fold_AB = os.path.join(args.fold_AB, sp)
if not os.path.isdir(img_fold_AB):
os.makedirs(img_fold_AB)
print('split = %s, number of images = %d' % (sp, num_imgs))
for n in range(num_imgs):
name_A = img_list[n]
path_A = os.path.join(img_fold_A, name_A)
if args.use_AB:
name_B = name_A.replace('_A.', '_B.')
else:
name_B = name_A
path_B = os.path.join(img_fold_B, name_B)
if os.path.isfile(path_A) and os.path.isfile(path_B):
name_AB = name_A
if args.use_AB:
name_AB = name_AB.replace('_A.', '.') # remove _A
path_AB = os.path.join(img_fold_AB, name_AB)
im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_AB = np.concatenate([im_A, im_B], 1)
cv2.imwrite(path_AB, im_AB)
| 2,208 | 44.081633 | 129 | py |
query-selected-attention | query-selected-attention-main/datasets/prepare_cityscapes_dataset.py | import os
import glob
from PIL import Image
help_msg = """
The dataset can be downloaded from https://cityscapes-dataset.com.
Please download the datasets [gtFine_trainvaltest.zip] and [leftImg8bit_trainvaltest.zip] and unzip them.
gtFine contains the semantics segmentations. Use --gtFine_dir to specify the path to the unzipped gtFine_trainvaltest directory.
leftImg8bit contains the dashcam photographs. Use --leftImg8bit_dir to specify the path to the unzipped leftImg8bit_trainvaltest directory.
The processed images will be placed at --output_dir.
Example usage:
python prepare_cityscapes_dataset.py --gitFine_dir ./gtFine/ --leftImg8bit_dir ./leftImg8bit --output_dir ./datasets/cityscapes/
"""
def load_resized_img(path):
return Image.open(path).convert('RGB').resize((256, 256))
def check_matching_pair(segmap_path, photo_path):
segmap_identifier = os.path.basename(segmap_path).replace('_gtFine_color', '')
photo_identifier = os.path.basename(photo_path).replace('_leftImg8bit', '')
assert segmap_identifier == photo_identifier, \
"[%s] and [%s] don't seem to be matching. Aborting." % (segmap_path, photo_path)
def process_cityscapes(gtFine_dir, leftImg8bit_dir, output_dir, phase):
save_phase = 'test' if phase == 'val' else 'train'
savedir = os.path.join(output_dir, save_phase)
os.makedirs(savedir, exist_ok=True)
os.makedirs(savedir + 'A', exist_ok=True)
os.makedirs(savedir + 'B', exist_ok=True)
print("Directory structure prepared at %s" % output_dir)
segmap_expr = os.path.join(gtFine_dir, phase) + "/*/*_color.png"
segmap_paths = glob.glob(segmap_expr)
segmap_paths = sorted(segmap_paths)
photo_expr = os.path.join(leftImg8bit_dir, phase) + "/*/*_leftImg8bit.png"
photo_paths = glob.glob(photo_expr)
photo_paths = sorted(photo_paths)
assert len(segmap_paths) == len(photo_paths), \
"%d images that match [%s], and %d images that match [%s]. Aborting." % (len(segmap_paths), segmap_expr, len(photo_paths), photo_expr)
for i, (segmap_path, photo_path) in enumerate(zip(segmap_paths, photo_paths)):
check_matching_pair(segmap_path, photo_path)
segmap = load_resized_img(segmap_path)
photo = load_resized_img(photo_path)
# data for pix2pix where the two images are placed side-by-side
sidebyside = Image.new('RGB', (512, 256))
sidebyside.paste(segmap, (256, 0))
sidebyside.paste(photo, (0, 0))
savepath = os.path.join(savedir, "%d.jpg" % i)
sidebyside.save(savepath, format='JPEG', subsampling=0, quality=100)
# data for cyclegan where the two images are stored at two distinct directories
savepath = os.path.join(savedir + 'A', "%d_A.jpg" % i)
photo.save(savepath, format='JPEG', subsampling=0, quality=100)
savepath = os.path.join(savedir + 'B', "%d_B.jpg" % i)
segmap.save(savepath, format='JPEG', subsampling=0, quality=100)
if i % (len(segmap_paths) // 10) == 0:
print("%d / %d: last image saved at %s, " % (i, len(segmap_paths), savepath))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gtFine_dir', type=str, required=True,
help='Path to the Cityscapes gtFine directory.')
parser.add_argument('--leftImg8bit_dir', type=str, required=True,
help='Path to the Cityscapes leftImg8bit_trainvaltest directory.')
parser.add_argument('--output_dir', type=str, required=True,
default='./datasets/cityscapes',
help='Directory the output images will be written to.')
opt = parser.parse_args()
print(help_msg)
print('Preparing Cityscapes Dataset for val phase')
process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "val")
print('Preparing Cityscapes Dataset for train phase')
process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "train")
print('Done')
| 4,040 | 43.406593 | 142 | py |
query-selected-attention | query-selected-attention-main/datasets/detect_cat_face.py | import cv2
import os
import glob
import argparse
def get_file_paths(folder):
image_file_paths = []
for root, dirs, filenames in os.walk(folder):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if filename.endswith('.png') or filename.endswith('.jpg'):
image_file_paths.append(file_path)
break # prevent descending into subfolders
return image_file_paths
SF = 1.05
N = 3
def detect_cat(img_path, cat_cascade, output_dir, ratio=0.05, border_ratio=0.25):
print('processing {}'.format(img_path))
output_width = 286
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
H, W = img.shape[0], img.shape[1]
minH = int(H * ratio)
minW = int(W * ratio)
cats = cat_cascade.detectMultiScale(gray, scaleFactor=SF, minNeighbors=N, minSize=(minH, minW))
for cat_id, (x, y, w, h) in enumerate(cats):
x1 = max(0, x - w * border_ratio)
x2 = min(W, x + w * (1 + border_ratio))
y1 = max(0, y - h * border_ratio)
y2 = min(H, y + h * (1 + border_ratio))
img_crop = img[int(y1):int(y2), int(x1):int(x2)]
img_name = os.path.basename(img_path)
out_path = os.path.join(output_dir, img_name.replace('.jpg', '_cat%d.jpg' % cat_id))
print('write', out_path)
img_crop = cv2.resize(img_crop, (output_width, output_width), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(out_path, img_crop, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='detecting cat faces using opencv detector')
parser.add_argument('--input_dir', type=str, help='input image directory')
parser.add_argument('--output_dir', type=str, help='wihch directory to store cropped cat faces')
parser.add_argument('--use_ext', action='store_true', help='if use haarcascade_frontalcatface_extended or not')
args = parser.parse_args()
if args.use_ext:
cat_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface.xml')
else:
cat_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface_extended.xml')
img_paths = get_file_paths(args.input_dir)
print('total number of images {} from {}'.format(len(img_paths), args.input_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for img_path in img_paths:
detect_cat(img_path, cat_cascade, args.output_dir)
| 2,566 | 38.492308 | 115 | py |
query-selected-attention | query-selected-attention-main/datasets/make_dataset_aligned.py | import os
from PIL import Image
def get_file_paths(folder):
image_file_paths = []
for root, dirs, filenames in os.walk(folder):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if filename.endswith('.png') or filename.endswith('.jpg'):
image_file_paths.append(file_path)
break # prevent descending into subfolders
return image_file_paths
def align_images(a_file_paths, b_file_paths, target_path):
if not os.path.exists(target_path):
os.makedirs(target_path)
for i in range(len(a_file_paths)):
img_a = Image.open(a_file_paths[i])
img_b = Image.open(b_file_paths[i])
assert(img_a.size == img_b.size)
aligned_image = Image.new("RGB", (img_a.size[0] * 2, img_a.size[1]))
aligned_image.paste(img_a, (0, 0))
aligned_image.paste(img_b, (img_a.size[0], 0))
aligned_image.save(os.path.join(target_path, '{:04d}.jpg'.format(i)))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset-path',
dest='dataset_path',
help='Which folder to process (it should have subfolders testA, testB, trainA and trainB'
)
args = parser.parse_args()
dataset_folder = args.dataset_path
print(dataset_folder)
test_a_path = os.path.join(dataset_folder, 'testA')
test_b_path = os.path.join(dataset_folder, 'testB')
test_a_file_paths = get_file_paths(test_a_path)
test_b_file_paths = get_file_paths(test_b_path)
assert(len(test_a_file_paths) == len(test_b_file_paths))
test_path = os.path.join(dataset_folder, 'test')
train_a_path = os.path.join(dataset_folder, 'trainA')
train_b_path = os.path.join(dataset_folder, 'trainB')
train_a_file_paths = get_file_paths(train_a_path)
train_b_file_paths = get_file_paths(train_b_path)
assert(len(train_a_file_paths) == len(train_b_file_paths))
train_path = os.path.join(dataset_folder, 'train')
align_images(test_a_file_paths, test_b_file_paths, test_path)
align_images(train_a_file_paths, train_b_file_paths, train_path)
| 2,257 | 34.28125 | 97 | py |
query-selected-attention | query-selected-attention-main/data/base_dataset.py | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataroot
self.current_epoch = 0
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if 'fixsize' in opt.preprocess:
transform_list.append(transforms.Resize(params["size"], method))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
if "gta2cityscapes" in opt.dataroot:
osize[0] = opt.load_size // 2
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
elif 'scale_shortside' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, opt.crop_size, method)))
if 'zoom' in opt.preprocess:
if params is None:
transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method)))
else:
transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method, factor=params["scale_factor"])))
if 'crop' in opt.preprocess:
if params is None or 'crop_pos' not in params:
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if 'patch' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __patch(img, params['patch_index'], opt.crop_size)))
if 'trim' in opt.preprocess:
transform_list.append(transforms.Lambda(lambda img: __trim(img, opt.crop_size)))
# if opt.preprocess == 'none':
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
if not opt.no_flip:
if params is None or 'flip' not in params:
transform_list.append(transforms.RandomHorizontalFlip())
elif 'flip' in params:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
return img.resize((w, h), method)
def __random_zoom(img, target_width, crop_width, method=Image.BICUBIC, factor=None):
if factor is None:
zoom_level = np.random.uniform(0.8, 1.0, size=[2])
else:
zoom_level = (factor[0], factor[1])
iw, ih = img.size
zoomw = max(crop_width, iw * zoom_level[0])
zoomh = max(crop_width, ih * zoom_level[1])
img = img.resize((int(round(zoomw)), int(round(zoomh))), method)
return img
def __scale_shortside(img, target_width, crop_width, method=Image.BICUBIC):
ow, oh = img.size
shortside = min(ow, oh)
if shortside >= target_width:
return img
else:
scale = target_width / shortside
return img.resize((round(ow * scale), round(oh * scale)), method)
def __trim(img, trim_width):
ow, oh = img.size
if ow > trim_width:
xstart = np.random.randint(ow - trim_width)
xend = xstart + trim_width
else:
xstart = 0
xend = ow
if oh > trim_width:
ystart = np.random.randint(oh - trim_width)
yend = ystart + trim_width
else:
ystart = 0
yend = oh
return img.crop((xstart, ystart, xend, yend))
def __scale_width(img, target_width, crop_width, method=Image.BICUBIC):
ow, oh = img.size
if ow == target_width and oh >= crop_width:
return img
w = target_width
h = int(max(target_width * oh / ow, crop_width))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __patch(img, index, size):
ow, oh = img.size
nw, nh = ow // size, oh // size
roomx = ow - nw * size
roomy = oh - nh * size
startx = np.random.randint(int(roomx) + 1)
starty = np.random.randint(int(roomy) + 1)
index = index % (nw * nh)
ix = index // nh
iy = index % nh
gridx = startx + ix * size
gridy = starty + iy * size
return img.crop((gridx, gridy, gridx + size, gridy + size))
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
| 8,026 | 33.748918 | 153 | py |
query-selected-attention | query-selected-attention-main/data/unaligned_dataset.py | import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import util.util as util
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'
if opt.phase == "test" and not os.path.exists(self.dir_A) \
and os.path.exists(os.path.join(opt.dataroot, "valA")):
self.dir_A = os.path.join(opt.dataroot, "valA")
self.dir_B = os.path.join(opt.dataroot, "valB")
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
# Apply image transformation
# For FastCUT mode, if in finetuning phase (learning rate is decaying),
# do not perform resize-crop data augmentation of CycleGAN.
# print('current_epoch', self.current_epoch)
is_finetuning = self.opt.isTrain and self.current_epoch > self.opt.n_epochs
modified_opt = util.copyconf(self.opt, load_size=self.opt.crop_size if is_finetuning else self.opt.load_size)
transform = get_transform(modified_opt)
A = transform(A_img)
B = transform(B_img)
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
| 3,582 | 43.7875 | 122 | py |
query-selected-attention | query-selected-attention-main/data/image_folder.py | """A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir, followlinks=True)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| 1,941 | 27.985075 | 122 | py |
query-selected-attention | query-selected-attention-main/data/__init__.py | """This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
print("dataset [%s] was created" % type(self.dataset).__name__)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads),
drop_last=True
)
def set_epoch(self, epoch):
self.dataset.current_epoch = epoch
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
| 3,667 | 36.050505 | 176 | py |
query-selected-attention | query-selected-attention-main/data/template_dataset.py | """Dataset class template
This module provides a template for users to implement custom datasets.
You can specify '--dataset_mode template' to use this dataset.
The class name should be consistent with both the filename and its dataset_mode option.
The filename should be <dataset_mode>_dataset.py
The class name should be <Dataset_mode>Dataset.py
You need to implement the following functions:
-- <modify_commandline_options>: Add dataset-specific options and rewrite default values for existing options.
-- <__init__>: Initialize this dataset class.
-- <__getitem__>: Return a data point and its metadata information.
-- <__len__>: Return the number of images.
"""
from data.base_dataset import BaseDataset, get_transform
# from data.image_folder import make_dataset
# from PIL import Image
class TemplateDataset(BaseDataset):
"""A template dataset class for you to implement custom datasets."""
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
A few things can be done here.
- save the options (have been done in BaseDataset)
- get image paths and meta information of the dataset.
- define the image transformation.
"""
# save the option and dataset root
BaseDataset.__init__(self, opt)
# get the image paths of your dataset;
self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
# define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
self.transform = get_transform(opt)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index -- a random integer for data indexing
Returns:
a dictionary of data with their names. It usually contains the data itself and its metadata information.
Step 1: get a random image path: e.g., path = self.image_paths[index]
Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
Step 4: return a data point as a dictionary.
"""
path = 'temp' # needs to be a string
data_A = None # needs to be a tensor
data_B = None # needs to be a tensor
return {'data_A': data_A, 'data_B': data_B, 'path': path}
def __len__(self):
"""Return the total number of images."""
return len(self.image_paths)
| 3,506 | 45.144737 | 156 | py |
query-selected-attention | query-selected-attention-main/data/single_dataset.py | from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
class SingleDataset(BaseDataset):
"""This dataset class can load a set of images specified by the path --dataroot /path/to/data.
It can be used for generating CycleGAN results only for one side with the model option '-model test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.transform = get_transform(opt, grayscale=(input_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A and A_paths
A(tensor) - - an image in one domain
A_paths(str) - - the path of the image
"""
A_path = self.A_paths[index]
A_img = Image.open(A_path).convert('RGB')
A = self.transform(A_img)
return {'A': A, 'A_paths': A_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.A_paths)
| 1,495 | 35.487805 | 105 | py |
minhashcuda | minhashcuda-master/test.py | from time import time
import unittest
from datasketch import WeightedMinHashGenerator, WeightedMinHash
import libMHCUDA
import numpy
from scipy.sparse import csr_matrix
from scipy.stats import gamma, uniform
class MHCUDATests(unittest.TestCase):
def test_calc_tiny(self):
v1 = [1, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 6, 7, 8, 0, 0, 0, 0, 0, 0, 9, 10, 4]
v2 = [2, 0, 0, 0, 4, 3, 8, 0, 0, 0, 0, 4, 7, 10, 0, 0, 0, 0, 0, 0, 9, 0, 0]
bgen = WeightedMinHashGenerator(len(v1))
gen = libMHCUDA.minhash_cuda_init(len(v1), 128, devices=1, verbosity=2)
libMHCUDA.minhash_cuda_assign_vars(gen, bgen.rs, bgen.ln_cs, bgen.betas)
m = csr_matrix(numpy.array([v1, v2], dtype=numpy.float32))
hashes = libMHCUDA.minhash_cuda_calc(gen, m)
libMHCUDA.minhash_cuda_fini(gen)
self.assertEqual(hashes.shape, (2, 128, 2))
true_hashes = numpy.array([bgen.minhash(v1).hashvalues,
bgen.minhash(v2).hashvalues], dtype=numpy.uint32)
self.assertEqual(true_hashes.shape, (2, 128, 2))
try:
self.assertTrue((hashes == true_hashes).all())
except AssertionError as e:
print("---- TRUE ----")
print(true_hashes)
print("---- FALSE ----")
print(hashes)
raise e from None
def _test_calc_big(self, devices):
numpy.random.seed(0)
data = numpy.random.randint(0, 100, (6400, 130))
mask = numpy.random.randint(0, 5, data.shape)
data *= (mask >= 4)
del mask
bgen = WeightedMinHashGenerator(data.shape[-1])
gen = libMHCUDA.minhash_cuda_init(data.shape[-1], 128, devices=devices, verbosity=2)
libMHCUDA.minhash_cuda_assign_vars(gen, bgen.rs, bgen.ln_cs, bgen.betas)
m = csr_matrix(data, dtype=numpy.float32)
print(m.nnz / (m.shape[0] * m.shape[1]))
ts = time()
hashes = libMHCUDA.minhash_cuda_calc(gen, m)
print("libMHCUDA:", time() - ts)
libMHCUDA.minhash_cuda_fini(gen)
self.assertEqual(hashes.shape, (len(data), 128, 2))
ts = time()
true_hashes = numpy.array([bgen.minhash(line).hashvalues for line in data],
dtype=numpy.uint32)
print("datasketch:", time() - ts)
self.assertEqual(true_hashes.shape, (len(data), 128, 2))
try:
self.assertTrue((hashes == true_hashes).all())
except AssertionError as e:
for r in range(hashes.shape[0]):
if (hashes[r] != true_hashes[r]).any():
print("first invalid row:", r)
print(hashes[r])
print(true_hashes[r])
break
raise e from None
def test_calc_big(self):
self._test_calc_big(1)
def test_calc_big_2gpus(self):
self._test_calc_big(3)
def test_random_vars(self):
gen = libMHCUDA.minhash_cuda_init(1000, 128, devices=1, verbosity=2)
rs, ln_cs, betas = libMHCUDA.minhash_cuda_retrieve_vars(gen)
libMHCUDA.minhash_cuda_fini(gen)
self.assertEqual(rs.shape, (128, 1000))
self.assertEqual(ln_cs.shape, (128, 1000))
self.assertEqual(betas.shape, (128, 1000))
cs = numpy.exp(ln_cs)
a, loc, scale = gamma.fit(rs)
self.assertTrue(1.97 < a < 2.03)
self.assertTrue(-0.01 < loc < 0.01)
self.assertTrue(0.98 < scale < 1.02)
a, loc, scale = gamma.fit(cs)
self.assertTrue(1.97 < a < 2.03)
self.assertTrue(-0.01 < loc < 0.01)
self.assertTrue(0.98 < scale < 1.02)
bmin, bmax = uniform.fit(betas)
self.assertTrue(0 <= bmin < 0.001)
self.assertTrue(0.999 <= bmax <= 1)
def test_integration(self):
numpy.random.seed(1)
data = numpy.random.randint(0, 100, (6400, 130))
mask = numpy.random.randint(0, 5, data.shape)
data *= (mask >= 4)
del mask
gen = libMHCUDA.minhash_cuda_init(data.shape[-1], 128, seed=1, verbosity=1)
m = csr_matrix(data, dtype=numpy.float32)
print(m.nnz / (m.shape[0] * m.shape[1]))
hashes = libMHCUDA.minhash_cuda_calc(gen, m)
libMHCUDA.minhash_cuda_fini(gen)
self.assertEqual(hashes.shape, (len(data), 128, 2))
h1 = WeightedMinHash(0, hashes[0])
h2 = WeightedMinHash(0, hashes[1])
cudamh = h1.jaccard(h2)
print(cudamh)
truemh = numpy.amin(data[:2], axis=0).sum() / numpy.amax(data[:2], axis=0).sum()
print(truemh)
self.assertTrue(abs(truemh - cudamh) < 0.005)
def test_slice(self):
numpy.random.seed(0)
data = numpy.random.randint(0, 100, (6400, 130))
mask = numpy.random.randint(0, 5, data.shape)
data *= (mask >= 4)
del mask
gen = libMHCUDA.minhash_cuda_init(data.shape[-1], 128, verbosity=2)
m = csr_matrix(data, dtype=numpy.float32)
hashes = libMHCUDA.minhash_cuda_calc(gen, m)
hashes2 = libMHCUDA.minhash_cuda_calc(
gen, m, row_start=3200, row_finish=4800)
libMHCUDA.minhash_cuda_fini(gen)
self.assertTrue((hashes[3200:4800] == hashes2).all())
def test_backwards(self):
v1 = [1, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 6, 7, 8, 0, 0, 0, 0, 0, 0, 9, 10, 4]
v2 = [2, 0, 0, 0, 4, 3, 8, 0, 0, 0, 0, 4, 7, 10, 0, 0, 0, 0, 0, 0, 9, 0, 0]
gen = libMHCUDA.minhash_cuda_init(len(v1), 128, devices=1, verbosity=2)
rs, ln_cs, betas = libMHCUDA.minhash_cuda_retrieve_vars(gen)
bgen = WeightedMinHashGenerator.__new__(WeightedMinHashGenerator)
bgen.dim = len(v1)
bgen.rs = rs
bgen.ln_cs = ln_cs
bgen.betas = betas
bgen.sample_size = 128
bgen.seed = None
m = csr_matrix(numpy.array([v1, v2], dtype=numpy.float32))
hashes = libMHCUDA.minhash_cuda_calc(gen, m)
libMHCUDA.minhash_cuda_fini(gen)
self.assertEqual(hashes.shape, (2, 128, 2))
true_hashes = numpy.array([bgen.minhash(v1).hashvalues,
bgen.minhash(v2).hashvalues], dtype=numpy.uint32)
self.assertEqual(true_hashes.shape, (2, 128, 2))
try:
self.assertTrue((hashes == true_hashes).all())
except AssertionError as e:
print("---- TRUE ----")
print(true_hashes)
print("---- FALSE ----")
print(hashes)
raise e from None
def test_deferred(self):
v1 = [1, 0, 0, 0, 3, 4, 5, 0, 0, 0, 0, 6, 7, 8, 0, 0, 0, 0, 0, 0, 9, 10, 4]
v2 = [2, 0, 0, 0, 4, 3, 8, 0, 0, 0, 0, 4, 7, 10, 0, 0, 0, 0, 0, 0, 9, 0, 0]
gen = libMHCUDA.minhash_cuda_init(len(v1), 128, devices=1, verbosity=2)
vars = libMHCUDA.minhash_cuda_retrieve_vars(gen)
libMHCUDA.minhash_cuda_fini(gen)
gen = libMHCUDA.minhash_cuda_init(
len(v1), 128, devices=1, deferred=True, verbosity=2)
libMHCUDA.minhash_cuda_assign_vars(gen, *vars)
bgen = WeightedMinHashGenerator.__new__(WeightedMinHashGenerator)
bgen.dim = len(v1)
bgen.rs, bgen.ln_cs, bgen.betas = vars
bgen.sample_size = 128
bgen.seed = None
m = csr_matrix(numpy.array([v1, v2], dtype=numpy.float32))
hashes = libMHCUDA.minhash_cuda_calc(gen, m)
libMHCUDA.minhash_cuda_fini(gen)
self.assertEqual(hashes.shape, (2, 128, 2))
true_hashes = numpy.array([bgen.minhash(v1).hashvalues,
bgen.minhash(v2).hashvalues], dtype=numpy.uint32)
self.assertEqual(true_hashes.shape, (2, 128, 2))
try:
self.assertTrue((hashes == true_hashes).all())
except AssertionError as e:
print("---- TRUE ----")
print(true_hashes)
print("---- FALSE ----")
print(hashes)
raise e from None
def test_float(self):
v1 = [
0, 1.0497366, 0.8494359, 0.66231006, 0.66231006, 0.8494359,
0, 0.66231006, 0.33652836, 0, 0, 0.5359344,
0.8494359, 0.66231006, 1.0497366, 0.33652836, 0.66231006, 0.8494359,
0.6800841, 0.33652836]
gen = libMHCUDA.minhash_cuda_init(len(v1), 128, devices=1, seed=7, verbosity=2)
vars = libMHCUDA.minhash_cuda_retrieve_vars(gen)
bgen = WeightedMinHashGenerator.__new__(WeightedMinHashGenerator)
bgen.dim = len(v1)
bgen.rs, bgen.ln_cs, bgen.betas = vars
bgen.sample_size = 128
bgen.seed = None
m = csr_matrix(numpy.array(v1, dtype=numpy.float32))
hashes = libMHCUDA.minhash_cuda_calc(gen, m).astype(numpy.int32)
libMHCUDA.minhash_cuda_fini(gen)
self.assertEqual(hashes.shape, (1, 128, 2))
true_hashes = numpy.array([bgen.minhash(v1).hashvalues], dtype=numpy.int32)
self.assertEqual(true_hashes.shape, (1, 128, 2))
try:
self.assertTrue((hashes == true_hashes).all())
except AssertionError as e:
print("---- TRUE ----")
print(true_hashes)
print("---- FALSE ----")
print(hashes)
raise e from None
def test_split(self):
def run_test(v):
k = sum([len(part) for part in v])
bgen = WeightedMinHashGenerator(len(k))
gen = libMHCUDA.minhash_cuda_init(len(k), 128, devices=4, verbosity=2)
libMHCUDA.minhash_cuda_assign_vars(gen, bgen.rs, bgen.ln_cs, bgen.betas)
m = csr_matrix(numpy.array(v, dtype=numpy.float32))
hashes = None
try:
hashes = libMHCUDA.minhash_cuda_calc(gen, m)
finally:
self.assertIsNotNone(hashes)
self.assertEqual(hashes.shape, (1, 128, 2))
libMHCUDA.minhash_cuda_fini(gen)
# here we try to break minhashcuda with unbalanced partitions
run_test([[2], [1], [1], [1]])
run_test([[1] * 50, [1], [1], [1]])
run_test([[1], [1] * 50, [1], [1]])
run_test([[1], [1], [1] * 50, [1]])
run_test([[1], [1], [1], [1] * 50])
run_test([[1] * 3, [1] * 10, [1] * 5, [1] * 2])
if __name__ == "__main__":
unittest.main()
| 10,279 | 42.375527 | 92 | py |
minhashcuda | minhashcuda-master/setup.py | from multiprocessing import cpu_count
import os
from setuptools import setup
from setuptools.command.build_py import build_py
from setuptools.dist import Distribution
from shutil import copyfile
from subprocess import check_call
import sys
import sysconfig
with open(os.path.join(os.path.dirname(__file__), "README.md")) as f:
long_description = f.read()
def get_python_library():
"""Get path to the python library associated with the current python
interpreter."""
cfgvar = sysconfig.get_config_var
libname = cfgvar("LDLIBRARY")
python_library = os.path.join(
cfgvar("LIBDIR") + (cfgvar("multiarchsubdir") or ""),
libname)
if os.path.exists(python_library):
return python_library
for root, dirnames, filenames in os.walk(cfgvar("base")):
for filename in filenames:
if filename == libname:
return os.path.join(root, filename)
raise FileNotFoundError(libname)
class CMakeBuild(build_py):
SHLIBEXT = "dylib" if sys.platform == "darwin" else "so"
def run(self):
if not self.dry_run:
self._build()
super(CMakeBuild, self).run()
def get_outputs(self, *args, **kwargs):
outputs = super(CMakeBuild, self).get_outputs(*args, **kwargs)
outputs.extend(self._shared_lib)
return outputs
def _build(self, builddir=None):
syspaths = sysconfig.get_paths()
check_call(("cmake", "-DCMAKE_BUILD_TYPE=Release",
"-DCUDA_TOOLKIT_ROOT_DIR=%s" % os.getenv(
"CUDA_TOOLKIT_ROOT_DIR",
"must_export_CUDA_TOOLKIT_ROOT_DIR"),
"-DPYTHON_DEFAULT_EXECUTABLE=python3",
"-DPYTHON_INCLUDE_DIRS=" + syspaths["include"],
"-DPYTHON_EXECUTABLE=" + sys.executable,
"-DPYTHON_LIBRARY=" + get_python_library(),
"."))
check_call(("make", "-j%d" % cpu_count()))
self.mkpath(self.build_lib)
shlib = "libMHCUDA." + self.SHLIBEXT
dest = os.path.join(self.build_lib, shlib)
copyfile(shlib, dest)
self._shared_lib = [dest]
class BinaryDistribution(Distribution):
"""Distribution which always forces a binary package with platform name"""
def has_ext_modules(self):
return True
def is_pure(self):
return False
setup(
name="libMHCUDA",
description="Accelerated Weighted MinHash-ing on GPU",
long_description=long_description,
long_description_content_type="text/markdown",
version="2.1.1",
license="Apache Software License",
author="Vadim Markovtsev",
author_email="[email protected]",
url="https://github.com/src-d/minhashcuda",
download_url="https://github.com/src-d/minhashcuda",
py_modules=["libMHCUDA"],
install_requires=["numpy"],
distclass=BinaryDistribution,
cmdclass={"build_py": CMakeBuild},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Topic :: Scientific/Engineering :: Information Analysis",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6"
]
)
# python3 setup.py bdist_wheel
# auditwheel repair -w dist dist/*
# twine upload dist/*manylinux*
| 3,487 | 33.534653 | 78 | py |
biosbias | biosbias-master/download_bios.py | from pebble import ProcessPool, ProcessExpired
import os
from argparse import ArgumentParser
from multiprocessing import cpu_count
import time
import gzip
import json
import requests
import sys
import pickle as pkl
from warcio.archiveiterator import ArchiveIterator
import re
MAX_PAGE_LEN = 100 * 1000
MAX_LINE_LEN = 1000
MIN_LENGTH = 150
MAX_PRECEED = 40
# PREFIXES = {'Col', 'Councillor', 'Dr', 'Lecturer', 'Maj', 'Mr', 'Mrs', 'Ms', 'Prof', 'Professor', 'Professsor'} # change back to set()
COMMON_CRAWL_URL = 'https://commoncrawl.s3.amazonaws.com/'
parser = ArgumentParser()
parser.add_argument('wetpaths',
help='common_crawl date like 2017-43 (see http://commoncrawl.org/the-data/get-started/ ) *or* a path to a -wet.paths file')
# parser.add_argument("-w", "--wetpaths", dest="wetpaths",
# help="read paths from FILE", metavar="FILE")
parser.add_argument("-o", "--output", dest="output",
help="write bios to OUT.pkl", metavar="OUT")
parser.add_argument("-r", "--retries", dest="retries", type=int, default=2, help="number of retries per path")
parser.add_argument("-p", "--parallel", dest="parallel", type=int, default=0,
help="number of parallel threads", metavar="N")
args = parser.parse_args()
with open("freq_titles.json", "r") as f:
freq_titles = json.load(f)
lower_freq_titles = {t.lower(): normalized for t, normalized in freq_titles.items()}
def re_escape_title(title):
if title.isupper():
return re.escape(title)
return "".join(f"[{c}{c.lower()}]" if c.isupper() else re.escape(c) for c in title)
freq_titles_regex = re.compile(
r"\b|".join(
re_escape_title(title) for title in sorted(freq_titles)
) + r"\b"
)
bad_regex = re.compile(r"\b(?:I|you|your|me|my|mine|myself|our|ours|us|we|ourselves)\b", flags=re.I)
m_regex = re.compile(r"\b(?:mr|his|he|him|himself)\b", flags=re.I)
f_regex = re.compile(r"\b(?:mrs|ms|hers|she|her|herself)\b", flags=re.I)
sentence_end_regex = re.compile(r"\. +[A-Z]")
def infer_gender(bio):
if re.search(bad_regex, bio):
return None
m_count = bool(re.search(m_regex, bio))
f_count = bool(re.search(f_regex, bio))
if f_count > 0 and m_count == 0:
return "F"
if m_count > 0 and f_count == 0:
return "M"
acceptable_prefixes = "adjunct|artist|assistant|associate|attorney|author|bio|biography|brief bio|brief biography|biographical sketch|br|brother|chancellor|chaplain|chapln|col|colonel|councillor|currently|description|director|doctor|dr|experience|facilitator|father|fr|gov|governor|host|image|instructor|lecturer|madam|madame|maj|miss|missus|mister|mme|monsieur|monsignor|mr|mrs|ms|msgr|note|now|pastor|plaintiff|pres|presenter|president|prince|principal|prof|professionally|professor|profile|rabbi|reader|rep|representative|respondent|rev|reverend|reviewer|rev|saint|sen|senator|senor|senora|senorita|sgt|sir|sister|speaker|sr|sra|srta|st|the hon|the honorable|today"
lname_strip_regex = re.compile(r"^[^a-z]*(?:\b(?:[a-z]|"+ acceptable_prefixes +r")\b[^a-z]*)*", re.I)
lname_kill_regex = re.compile(r"^(?:about|abstract|additionally|although|and|but|by|comments|example|he|plot|review|she|source|story|summary|synopsis|the|there|today|when|while|yes)\b", re.I)
rname_regex = re.compile(r"(?:[\b(,\. ]+(?:\(eae\)|[a-z]|ab|abpp|aia|ao|apn|aprn|arnp|asid|asla|ba|bs|bsn|ca|cbe|ccrn|cde|cdn|cdw|ceo|cfo|cipd|clt|cnm|cnp|cpa|cpnp|crnp|csat|cso|cssd|dc|dds|djb|dmd|dnp|e\-?ryt[\- \d]*|edd|esq|faan|facs|faia|fca|fnp|fnp-bc|fnp-c|frcs|ii|iii|iv|jd|jg|jr|lac|ladc|lcpc|lcsw|ld|ldn|licsw|ll|llm|llp|lmft|lmhc|lmt|lp|lpc|ma|mba|mc|md|mfa|mft|mlc|mms|mn|mpas|mph|ms|msn|mw|ncarb|nd|np|np-c|pa-c|pa\-c|ph|phd|pla|pm|psy|psyd|ra|rcyt[\- \d]*|rd|rdn|riba|rla|rn|rn\-bc|ryt|sr)[\b\., )]*)*$", re.I)
name_regex = re.compile(r"^([A-Z][a-zâêîôûŵŷäëïöüẅÿàèìòùẁỳáéíóúẃý]+(?:\-[A-Z][a-zâêîôûŵŷäëïöüẅÿàèìòùẁỳáéíóúẃý]+)*)( +[A-Z](?:\.|[a-zâêîôûŵŷäëïöüẅÿàèìòùẁỳáéíóúẃý]*))?((?: van)? +(?:Mc|De|O')?[A-Z][a-zâêîôûŵŷäëïöüẅÿàèìòùẁỳáéíóúẃý]+(?:\-[A-Z][a-zâêîôûŵŷäëïöüẅÿàèìòùẁỳáéíóúẃý]+)*)$")
def extract_name(name):
name = name[lname_strip_regex.match(name).end():]
if lname_kill_regex.match(name):
return None
name = name[:rname_regex.search(name).start()]
match = name_regex.match(name)
if not match:
return None
return tuple(g.strip().replace(".", "") if g else "" for g in match.groups())
def log(text):
try:
if not text.endswith("\n"):
text += "\n"
with open(log_fname, "a") as f:
f.write(text)
except Exception as e:
print("*** Unable to log!")
print(e)
def extract_bios_from_page(page, URI, max_preceed=MAX_PRECEED, min_len=MIN_LENGTH):
if "the" not in page:
return [] # probably not English
ISA = " is a "
ISAN = " is an "
n = len(ISA) + max_preceed
matches = []
failures = []
for line_str in page.split('\n'):
if len(line_str) >= min_len:
if ISA in line_str[:n]:
a = line_str.index(ISA)
b = a + len(ISA)
elif ISAN in line_str[:n + 1]:
a = line_str.index(ISAN)
b = a + len(ISAN)
else:
continue
m = re.match(freq_titles_regex, line_str[b:]) # is an architect
if not m: # try is an American architect (1 word before title) also catches boston-based architect
c = line_str.find(" ", b)
if c == -1 or line_str[c - 1] == ',': # avoid 'is a performer, architect blah ...'
continue
m = re.match(freq_titles_regex, line_str[c + 1:])
if not m:
continue
end = c + 1 + m.end()
else:
end = b + m.end()
if m:
# if "→" in line_str:
# weird = line_str.index("→")
# if weird<end:
# continue
# line_str = line_str[:weird]
title = m.group()
if title.lower() not in lower_freq_titles:
print(f"!!!!!! Strange title: '{title}'")
continue
g = infer_gender(line_str)
if not g or line_str[end:].startswith(",") or line_str[end:].startswith(" and "): # avoid 'is an architect and' or 'is an architect, blah'
# maybe add: or line_str[end:].startswith("/") # avoid 'is an architect/designer...'
continue
if line_str.find("\t", end)!=-1:
line_str = line_str[:line_str.find("\t", end)]
if len(line_str) > MAX_LINE_LEN:
continue
m2 = re.search(sentence_end_regex, line_str[end:])
if not m2:
continue
start_pos = end + m2.start() + 1
body = line_str[start_pos:].strip()
if len(body) < min_len:
continue
name = extract_name(line_str[:a])
if not name:
continue
matches.append(
{"raw": line_str, "name": name, "raw_title": title, "gender": g, "start_pos": start_pos,
"title": lower_freq_titles[title.lower()], "URI": URI})
return matches
def dedup_exact(people):
seen = set()
return [p for p in people if not (p["raw"] in seen or seen.add(p["raw"]))]
def bios_from_wet_url(url, verbose=False):
try:
time0 = time.time()
log("TRYING "+url)
r = requests.get(url, stream=True)
assert r.status_code == 200, f"*** Got status code {r.status_code} != 200"
if verbose:
print(f"Status code {r.status_code} for {url}")
it = ArchiveIterator(fileobj=r.raw)
it.__next__()
ans = dedup_exact([bio for record in it for bio in
extract_bios_from_page(record.content_stream().read().decode()[:MAX_PAGE_LEN], record.rec_headers.get_header('WARC-Target-URI'))])
log(f"DONE {url} {time.time()-time0:.1f} seconds")
return ans
except Exception as e:
print(f"*** Exception in {url}:", file=sys.stderr)
print(f"*** {e}", file=sys.stderr)
print(f"***", file=sys.stderr)
print("", file=sys.stderr)
return None
def chunks(arr, n):
n = max(n, 1)
m = len(arr)
return [arr[(m * i) // n:(m * (i + 1)) // n] for i in range(n)]
def process_urls(paths, n_processes, prefix=COMMON_CRAWL_URL, max_failures=100, num_progress_reports=50):
print(f"Using {n_processes} parallel processes")
failed_paths = []
bios = []
time0 = time.time()
path_name = (paths[0] + '///').split('/')[1]
num_progress_reports = max(1, min(num_progress_reports, len(paths) // n_processes))
done = 0
pool = ProcessPool(n_processes)
for i, paths_chunk in enumerate(chunks(paths, num_progress_reports)):
ans = pool.map(bios_from_wet_url, [prefix + path for path in paths_chunk], timeout=1200)
iterator = ans.result()
for p in paths_chunk + ["done"]:
try:
a = next(iterator)
assert p != "done"
if a is not None:
bios += [dict(path=p, **b) for b in a]
continue
except StopIteration:
assert p == "done"
break
except Exception as error:
print("--------------------\n"*10 + f"function raised {error}")
failed_paths.append(p)
done += len(paths_chunk)
pct = (i + 1) / num_progress_reports
eta = (time.time() - time0) * (1 / pct - 1) / 60 / 60
print(
f"{eta:.1f} hours left, {done:,}/{len(paths):,} done ({pct:.0%}),",
f"{int(len(bios)/pct):,} estimated bios, {path_name}"
)
if len(failed_paths) > 0:
print(f" {len(failed_paths):,} failed paths")
if len(failed_paths) > max_failures:
break
pool.close()
return dedup_exact(bios), failed_paths # dedup_exact is new!
if __name__ == "__main__":
if not args.wetpaths.endswith("wet.paths"):
assert re.match(r"^[0-9]+-[0-9]+$",
args.wetpaths), "Expecting wetpaths to be either xxxx-xx or to end with wet.paths"
url = COMMON_CRAWL_URL + "crawl-data/CC-MAIN-{}/wet.paths.gz".format(args.wetpaths)
r = requests.get(url, stream=True)
assert r.status_code == 200
r.raw.decode_content = True # just in case transport encoding was applied
gzip_file = gzip.GzipFile(fileobj=r.raw)
paths = [line.decode().strip() for line in gzip_file]
print("Got {:,} urls from {}".format(len(paths), url))
path_root = "CC-MAIN-{}-".format(args.wetpaths)
else:
with open(args.wetpaths, "r") as f:
paths = [x.strip() for x in f.readlines()]
path_root = args.wetpaths.replace("wet.paths", "")
paths = paths
if args.parallel == 0:
c = cpu_count()
args.parallel = c-c//10
output_fname = args.output or (path_root + "bios.pkl")
assert output_fname.endswith("bios.pkl"), "Output filename must end with 'bios.pkl'"
log_fname = output_fname.replace("bios.pkl", "log.txt")
try:
os.remove(log_fname)
except:
pass
bios, failed_paths = process_urls(paths, n_processes=args.parallel)
if len(failed_paths) < len(paths) / 10:
for i in range(args.retries):
if not failed_paths:
break
print("\n" * 5)
print(f"*** Retry #{i+1} with {len(failed_paths)} failures")
more_bios, failed_paths = process_urls(failed_paths, n_processes=args.parallel)
bios += more_bios
with open(output_fname, "wb") as f:
print(f"Wrote {len(bios):,} bios to {output_fname}")
pkl.dump(bios, f)
if len(failed_paths) > 0:
log("\nFailed paths:\n" + "\n".join(failed_paths))
print(f"*** Wrote {len(failed_paths):,} failures to {log_fname}")
| 12,315 | 39.916944 | 668 | py |
biosbias | biosbias-master/preprocess.py | import random, glob, re
import pickle as pkl
from argparse import ArgumentParser
titles_to_ignore = {'real_estate_broker', 'landscape_architect', 'massage_therapist', 'magician', 'acupuncturist'} # close but not enough data on these titles :-(
def save_pkl(obj, filename):
with open(filename, "wb") as f:
pkl.dump(obj, f)
def load_pkl(filename, verbose=True):
if verbose:
print(f"Loading '{filename}'")
with open(filename, "rb") as f:
return pkl.load(f)
def process(p, replacement="_"):
bio = p["raw"][p["start_pos"]:].strip()
names = p["name"]
assert len(names)==3
regExp = r"\b(?:[Hh]e|[Ss]he|[Hh]er|[Hh]is|[Hh]im|[Hh]ers|[Hh]imself|[Hh]erself|[Mm][Rr]|[Mm][Rr][sS]|[Mm][Ss]|"
regExp += "|".join([re.escape(n) for n in names if len(n)>0]) + r")\b"
bio = re.sub(regExp, replacement, bio)
p["bio"]=bio
def group_by(l, func):
ans = {}
for i in l:
k = func(i)
if k not in ans:
ans[k] = [i]
else:
ans[k].append(i)
return ans
def dedup_middle(bios): # remove triples where the middle name is a prefix of another middle name, so {Mary Lynn Doe, Mary L Doe, Mary Doe} => {Mary Lynn Doe}, but {Mary L Doe, Mary I Doe} => {Mary L Doe, Mary I Doe}
trips = group_by(bios, lambda b: (b["title"], b["name"][0], b["name"][2]))
for k in trips:
to_remove = set()
if len(k)==1:
continue
for b1 in trips[k]:
for b2 in trips[k]:
if b1 is not b2 and b1["name"][1].startswith(b2["name"][1]):
to_remove.add(b2["name"][1])
if to_remove:
trips[k] = [b for b in trips[k] if b["name"][1] not in to_remove]
return [b for v in trips.values() for b in v]
def dedup(people):
by_name_title = group_by(people, lambda b: (b["name"], b["title"]))
ans = [sorted(ps, key = lambda p: (-len(p["raw"]), p["raw"], p["path"]))[0] for ps in by_name_title.values()]
return ans
def main(paths, output_filename):
all_people = [p for path in paths for p in load_pkl(path) if p["title"] not in titles_to_ignore]
people = dedup_middle(dedup(all_people))
print(f"{len(people):,}/{len(all_people):,} 'different' name+titles ({len(people)/len(all_people):.1%})")
print("Processing bios...")
for p in people:
process(p)
save_pkl(people, output_filename)
print(f"Wrote {len(people):,} bios to '{output_filename}'")
#if len(peoples)>1: # show overlaps
# name_titles = [{(p["name"][0], p["name"][1], p["title"]) for p in people} for people in peoples]
# for path1, nts1 in zip(args.paths, name_titles):
# for path2, nts2 in zip(args.paths, name_titles):
# if path1<path2:
# overlap2 = sum([nt in nts2 for nt in nts1])/len(nts1) + sum([nt in nts1 for nt in nts2])/len(nts2)
# print(f"{overlap2/2:.1%} overlap between {path1:20} and {path2:20}")
# output = dedup([p for ps in peoples for p in ps])
#else:
# assert len(peoples)==1
# output = peoples[0]
if __name__ == '__main__':
parser = ArgumentParser(description='Dedup bios by name + title and add name field to records.')
parser.add_argument('paths', nargs='+', help='Path of bios .pkl file(s)', metavar="PATH")
parser.add_argument("-o", "--output", dest="output", help="write bios to OUT.pkl", metavar="OUT", default="BIOS.pkl")
args = parser.parse_args()
main(args.paths, args.output)
| 3,535 | 40.116279 | 216 | py |
aircraftnoise | aircraftnoise-master/classifier/__init__.py | 0 | 0 | 0 | py |
|
aircraftnoise | aircraftnoise-master/classifier/testing/convnettester.py | import tensorflow as tf
import sys
import os
import numpy as np
'''
The object that encapsulates the training procedure and status
'''
class ConvNetTester(object):
'''
The object constructor
'''
# net: the model object we are training
# server: the server object to use for all operating system
# interactions
# trainer: the object which trains the model
def __init__(self, net, server, trainer):
# keep pre-constructed objects
self.net = net
self.server = server
self.trainer = trainer
def show_progress(self, accuracy, precision, recall, f1,
folds, tries, tri, cv_accuracy_sum, cv_precision_sum,
cv_recall_sum, cv_f1_sum, fold_accuracy_sum, fold_precision_sum,
fold_recall_sum, fold_f1_sum, fold):
self.server.log("****************************************", "warning")
progress = 1.0*(fold+1)/folds + 1.0*(tri+1)/tries*(1.0/folds)
self.server.log("", "warning")
self.server.log("Progress: " + str(progress), "warning")
self.server.log("Fold: " + str(fold+1) + " of " + str(folds), "warning")
self.server.log("Trial: " + str(tri) + " of " + str(tries), "warning")
tri_accuracy = fold_accuracy_sum/max(tri,1)
cv_accuracy = cv_accuracy_sum/max(fold,1)
self.server.log("", "warning")
self.server.log("This Accuracy: " + str(accuracy), "warning")
self.server.log("Fold Accuracy: " + str(tri_accuracy), "warning")
self.server.log("Overall Accuracy: " + str(cv_accuracy), "warning")
tri_precision = fold_precision_sum/max(tri,1)
cv_precision = cv_precision_sum/max(fold,1)
self.server.log("", "warning")
self.server.log("This Precision: " + str(precision), "warning")
self.server.log("Fold Precision: " + str(tri_precision), "warning")
self.server.log("Overall Precision: " + str(cv_precision), "warning")
tri_recall = fold_recall_sum/max(tri,1)
cv_recall = cv_recall_sum/max(fold,1)
self.server.log("", "warning")
self.server.log("This Recall: " + str(recall), "warning")
self.server.log("Fold Recall: " + str(tri_recall), "warning")
self.server.log("Overall Recall: " + str(cv_recall), "warning")
tri_f1 = fold_f1_sum/max(tri,1)
cv_f1 = cv_f1_sum/max(fold,1)
self.server.log("", "warning")
self.server.log("This F1: " + str(f1), "warning")
self.server.log("Fold F1: " + str(tri_f1), "warning")
self.server.log("Overall F1: " + str(cv_f1), "warning")
self.server.log("", "warning")
self.server.log("****************************************", "warning")
def run_cross_validation(self, folds, trials_per_fold):
cv_accuracy_sum = 0
cv_precision_sum = 0
cv_recall_sum = 0
cv_f1_sum = 0
for fold in range(0, folds):
# Set adapter to correct train/test sets
self.server.adapter.set_fold(fold)
# Initialize accumulators to zero
fold_accuracy_sum = 0
fold_precision_sum = 0
fold_recall_sum = 0
fold_f1_sum = 0
tri = 0
while tri < trials_per_fold:
accuracy, precision, recall, f1 = self.trainer.train()
if not np.isnan(f1):
tri = tri + 1
fold_accuracy_sum = fold_accuracy_sum + accuracy
fold_precision_sum = fold_precision_sum + precision
fold_recall_sum = fold_recall_sum + recall
fold_f1_sum = fold_f1_sum + f1
else:
accuracy = 0
self.server.log("DUD", "warning")
self.show_progress(accuracy, precision, recall, f1,
folds, trials_per_fold, tri, cv_accuracy_sum,
cv_precision_sum, cv_recall_sum, cv_f1_sum,
fold_accuracy_sum, fold_precision_sum,
fold_recall_sum, fold_f1_sum, fold)
# Accumulate over full cross validation
cv_accuracy_sum = cv_accuracy_sum + fold_accuracy_sum/trials_per_fold
cv_precision_sum = cv_precision_sum + fold_precision_sum/trials_per_fold
cv_recall_sum = cv_recall_sum + fold_recall_sum/trials_per_fold
cv_f1_sum = cv_f1_sum + fold_f1_sum/trials_per_fold
| 4,580 | 44.356436 | 88 | py |
aircraftnoise | aircraftnoise-master/classifier/testing/__init__.py | 0 | 0 | 0 | py |
|
aircraftnoise | aircraftnoise-master/classifier/adapters/macadapter.py | from __future__ import division
import numpy as np
import nibabel as nib
import os
from collections import OrderedDict
import sys
# Default batch size (deprecated)
DEF_BATCH_SIZE = 20
class MACAdapter(object):
def __init__(self, input_dir, dim, folds=None):
# store dimensionality
self.dim = dim
self.folds = folds
# Set directory locations for all os interactions
input_dir = os.path.abspath(input_dir)
if (self.folds == None):
self.train_dir = os.path.join(input_dir, "training")
self.val_dir = os.path.join(input_dir, "validation")
self.test_dir = os.path.join(input_dir, "testing")
# Check to make sure input directory is valid
self._check_io()
# Load entire dataset into memory (it's plenty small)
self.trX = np.load(os.path.join(self.train_dir, 'trX.npy'))
self.trL = np.load(os.path.join(self.train_dir, 'trY.npy'))
self.trY = np.stack((self.trL[:,1],np.subtract(1,self.trL[:,1])), axis=1)
self.teX = np.load(os.path.join(self.test_dir, 'teX.npy'))
self.teL = np.load(os.path.join(self.test_dir, 'teY.npy'))
self.teY = np.stack((self.teL[:,1],np.subtract(1,self.teL[:,1])), axis=1)
self.vaX = np.load(os.path.join(self.val_dir, 'vaX.npy'))
self.vaL = np.load(os.path.join(self.val_dir, 'vaY.npy'))
self.vaY = np.stack((self.vaL[:,1],np.subtract(1,self.vaL[:,1])), axis=1)
# get and store sizes
self.tr_size = self.trX.shape[0]
self.va_size = self.vaX.shape[0]
self.te_size = self.teX.shape[0]
self.second_dim = self.trX.shape[1]
else:
self.lst = [[np.load(os.path.join(input_dir,str(fold) + "X.npy")),
np.load(os.path.join(input_dir,str(fold) + "Y.npy"))]
for fold in range(0,self.folds)]
self.second_dim = self.lst[0][0].shape[1]
self.fold = None
self.trX = None
self.trY = None
self.vaX = None
self.vaY = None
def _check_io(self):
# Ensure training directory has the correct structure
if not os.path.exists(self.train_dir):
print "Training directory: %s does not exist - ERROR"%self.train_dir
sys.exit(0)
if not os.path.exists(self.val_dir):
print "Validation directory: %s does not exist - ERROR"%self.val_dir
sys.exit(0)
if not os.path.exists(self.test_dir):
print "Testing directory: %s does not exist - ERROR"%self.test_dir
sys.exit(0)
def set_fold(self, fold):
self.fold = fold
self.trX = np.zeros((0, self.second_dim))
self.trL = np.zeros((0,2))
self.teX = np.zeros((0, self.second_dim))
self.teY = np.zeros((0,2))
self.vaX = np.zeros((0, self.second_dim))
self.vaL = np.zeros((0,2))
for i in range(0, self.folds):
if i == fold:
self.vaX = np.concatenate((self.vaX, self.lst[i][0]), axis=0)
self.vaL = np.concatenate((self.vaL, self.lst[i][1]), axis=0)
else:
self.trX = np.concatenate((self.trX, self.lst[i][0]), axis=0)
self.trL = np.concatenate((self.trL, self.lst[i][1]), axis=0)
self.trY = np.stack((self.trL[:,1],np.subtract(1,self.trL[:,1])), axis=1)
self.vaY = np.stack((self.vaL[:,1],np.subtract(1,self.vaL[:,1])), axis=1)
self.tr_size = self.trX.shape[0]
self.va_size = self.vaX.shape[0]
self.te_size = 0
def _get_rand_array(self, length, mx):
return np.random.randint(0, high=mx, size=length)
# This is real slow for large batches...
def get_batch(self, size=DEF_BATCH_SIZE, collection = None):
if collection == None or collection == "training":
size = min(size, self.tr_size)
arr = self._get_rand_array(size, self.tr_size)
X = np.take(self.trX, arr, axis=0).reshape([size, self.second_dim, 1])
Y = np.take(self.trY, arr, axis=0)
if collection == "validation":
# validation always returns full validation set
X = self.vaX.reshape([self.va_size, self.second_dim, 1])
Y = self.vaY
if collection == "testing":
# testing always returns the full testing set
X = self.teX.reshape([self.te_size, self.second_dim, 1])
Y = self.teY
durs = X[:,-1]
x = np.reshape(X[:,0:-1], [-1,37,self.dim,1])
return x, durs, Y
def get_ids(self):
return self.trL[:,0], self.vaL[:,0], self.teL[:,0]
| 4,760 | 37.088 | 85 | py |
aircraftnoise | aircraftnoise-master/classifier/adapters/__init__.py | 0 | 0 | 0 | py |
|
aircraftnoise | aircraftnoise-master/classifier/training/convnettrainer.py | import tensorflow as tf
import sys
import os
import numpy as np
'''
The object that encapsulates the training procedure and status
'''
class ConvNetTrainer(object):
'''
The object constructor
'''
# net: the model object we are training
# server: the server object to use for all operating system
# interactions
# epochs: number of epochs for training
# steps_per_epoch: number of training steps per epoch of training
# optimizer: string name of the appropriate optimizer
# opt_kwargs: keyword arguments to accompany the optimizer
# batch_size: The size of each training batch to request
# display_step: Log a status every multiple of display step
# keep_prob: The dropout probability to use during training
def __init__(self, net, server, epochs, steps_per_epoch,
optimizer = 'Adam',
opt_kwargs = {},
keep_prob = 1.0,
batch_size = 10,
display_step = 10):
# keep pre-constructed objects
self.net = net
self.server = server
self.keep_prob = keep_prob
self.batch_size = batch_size
self.display_step = display_step
# parameters for training process
self.epochs = epochs
self.steps_per_epoch = steps_per_epoch
# variable that holds the global step of training
self.global_step = tf.Variable(0)
self.optimizer = self._get_optimizer(optimizer, opt_kwargs)
'''
Configure and return the appropriate optimizer for training
'''
# opt_type: the string name of the optimizer to use (i.e. "Adam")
# opt_kwargs: the keyword arguments to accompany the particular optimizer
def _get_optimizer(self, opt_type, opt_kwargs):
if opt_type == 'Adam':
# get learning rate from kwargs
lr = opt_kwargs.pop("learning_rate", 0.2)
# Used to be exponential decay -- keeping the misnomer for now
self.variable_learning_rate = tf.constant(lr, dtype=tf.float32)
# Define optimizer objective
optimizer = tf.train.AdamOptimizer(learning_rate=self.variable_learning_rate).minimize(self.net.cost)
return optimizer
else:
print "Only Adam optimizer is currently supported - Exiting"
sys.exit(0)
'''
The function that runs the training process for the network
most execution time is spent here
'''
# restore_model: The path (relative) to a model checkpoint. If not None,
# the training starts with these weights
def train(self, restore_model = None, save_model = None):
# define the operation to initialize all variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# set model weights to saved values if called for
if restore_model is not None:
restore_abs = os.path.abspath(restore_model)
self.server.log("restoring from: " + restore_abs, "info")
self.net.restore(sess, restore_abs)
# make prediction with initial weights
val_x, val_dur, val_y = self.server.get_validation_batch(10000)
val_preds, accuracy = sess.run((self.net.predictor, self.net.accuracy),
feed_dict={
self.net.x: val_x,
self.net.durs: val_dur,
self.net.y: val_y,
self.net.keep_prob: 1.0
})
self.server.log("Weights initialized")
self.server.log("Initial validation accuracy: " + str(accuracy), "info")
# log the beginning of training
self.server.log("Entering training loop")
# Only save models better than 97% accuracy
max_accuracy = 0.95
# Iterate over all epochs
for epoch in range(0, self.epochs):
# Get batch for this epoch
batch_x, batch_dur, batch_y = self.server.get_training_batch(self.batch_size)
for step in range((epoch*self.steps_per_epoch),
(epoch+1)*self.steps_per_epoch):
# Run optimization step
_, loss, lr, = sess.run((self.optimizer, self.net.w_ce,
self.variable_learning_rate),
feed_dict = {
self.net.x: batch_x,
self.net.durs: batch_dur,
self.net.y: batch_y,
self.net.keep_prob: self.keep_prob
})
# Print step number if called for
if step % self.display_step == 0:
self.server.log("Step: " + str(step))
# Run prediction to get stats to display
val_x, val_dur, val_y = self.server.get_validation_batch(570)
val_preds, v_accuracy, v_f1 = sess.run((self.net.predictor, self.net.accuracy, self.net.f1),
feed_dict={
self.net.x: val_x,
self.net.durs: val_dur,
self.net.y: val_y,
self.net.keep_prob: 1.0
})
# Run prediction to get stats to display
t_accuracy, t_f1 = sess.run((self.net.accuracy, self.net.f1),
feed_dict={
self.net.x: batch_x,
self.net.durs: batch_dur,
self.net.y: batch_y,
self.net.keep_prob: 1.0
})
# log epoch
self.server.log("Step: " + str(step), "info")
self.server.log("Validation accuracy: " + str(v_accuracy), "info")
self.server.log("Validation f1: " + str(v_f1), "info")
self.server.log("Training accuracy: " + str(t_accuracy), 'info')
self.server.log("Training f1: " + str(t_f1), "info")
if ((v_accuracy > max_accuracy) and (save_model is not None)):
max_accuracy = v_accuracy
self.server.save_weights(self.net, step, sess, custom_name=(str(v_accuracy) + save_model))
# Run prediction after each training epoch
val_preds, v_accuracy, v_f1 = sess.run((self.net.predictor, self.net.accuracy, self.net.f1),
feed_dict={
self.net.x: val_x,
self.net.durs: val_dur,
self.net.y: val_y,
self.net.keep_prob: 1.0
})
# Run prediction after each training epoch
t_accuracy, t_f1 = sess.run((self.net.accuracy, self.net.f1),
feed_dict={
self.net.x: batch_x,
self.net.durs: batch_dur,
self.net.y: batch_y,
self.net.keep_prob: 1.0
})
# log epoch
self.server.log("End of epoch " + str(epoch), "info")
self.server.log("Validation accuracy: " + str(v_accuracy), "info")
self.server.log("Validation f1: " + str(v_f1), "info")
self.server.log("Training accuracy: " + str(t_accuracy), 'info')
self.server.log("Training f1: " + str(t_f1), "info")
_, accuracy, precision, recall, f1 = sess.run((self.net.predictor, self.net.accuracy, self.net.precision, self.net.recall, self.net.f1),
feed_dict={
self.net.x: val_x,
self.net.durs: val_dur,
self.net.y: val_y,
self.net.keep_prob: 1.0
})
if (save_model is not None):
self.server.save_weights(self.net, step, sess, custom_name=save_model)
return accuracy, precision, recall, f1
| 9,534 | 47.156566 | 148 | py |
aircraftnoise | aircraftnoise-master/classifier/training/__init__.py | 0 | 0 | 0 | py |
|
aircraftnoise | aircraftnoise-master/classifier/models/layers.py | from __future__ import print_function, division, absolute_import, unicode_literals
import tensorflow as tf
'''
Functions to initialize variables
'''
def weight_variable(shape, stddev):
return tf.Variable(tf.truncated_normal(shape, stddev=stddev))
def bias_variable(shape):
return tf.Variable(tf.constant(0.1,shape=shape))
def softmax(mp):
exponentials = tf.exp(mp)
sums = tf.reduce_sum(exponentials, 1, keep_dims=True)
return tf.div(exponentials, sums)
| 479 | 23 | 82 | py |
aircraftnoise | aircraftnoise-master/classifier/models/__init__.py | 0 | 0 | 0 | py |
|
aircraftnoise | aircraftnoise-master/classifier/models/convnet.py | from __future__ import division
from collections import OrderedDict
import numpy as np
from math import ceil, floor
from models.layers import *
'''
The function that defines the set of computations that takes the input x
to the set of logits predicted for each event
'''
def build_convnet(x, durs, csize=3, ksize=2, dim=10):
x_shape = tf.shape(x)
batch_size = x_shape[0]
height = 37
width = dim
# Variables for first convolution
w1 = weight_variable([csize, csize, 1, 4], stddev=np.sqrt(2 / (csize**2 * 4)))
b1 = bias_variable([4])
# Variables for second convolution
w2 = weight_variable([csize, csize, 4, 8], stddev=np.sqrt(2 / (csize**2 * 8)))
b2 = bias_variable([8])
# First convolution and pooling
conv1 = tf.nn.conv2d(x, w1, strides=[1, 1, 1, 1], padding='VALID')
h_conv1 = tf.nn.relu(conv1 + b1)
height = height - csize + 1
width = width - csize + 1
pool1 = tf.nn.max_pool(h_conv1, ksize=[1, ksize, ksize, 1], strides=[1, ksize, ksize, 1], padding='VALID')
height = ceil(float((height - (ksize - 1))) / float(ksize))
width = ceil(float((width - (ksize - 1))) / float(ksize))
# Second convolution and pooling
conv2 = tf.nn.conv2d(pool1, w2, strides=[1, 1, 1, 1], padding='VALID')
h_conv2 = tf.nn.relu(conv2 + b2)
height = height - csize + 1
width = width - csize + 1
pool2 = tf.nn.max_pool(h_conv2, ksize=[1, ksize, ksize, 1], strides=[1, ksize, ksize, 1], padding='VALID')
height = int(ceil(float((height - (ksize - 1))) / float(ksize)))
width = int(ceil(float((width - (ksize - 1))) / float(ksize)))
# Flat classifier input with duration
flattened_features = tf.concat((tf.reshape(pool2, [batch_size, height*width*8]), durs), axis=1)
# First dense layer
s1 = tf.layers.dense(flattened_features, 40, activation=tf.nn.relu)
# Second dense layer
s2 = tf.layers.dense(s1, 15, activation=tf.nn.relu)
# Third dense layer
s3 = tf.layers.dense(s2, 2, activation=tf.nn.relu)
return s3
'''
The object encapsulating the operations of the network
'''
class ConvNet(object):
'''
The constructor for the Network
'''
def __init__(self, dim):
tf.reset_default_graph()
self.dim = dim
# the placeholder for the input
self.x = tf.placeholder(tf.float32, shape=[None, 37, self.dim, 1], name="input")
# the placeholder for the durations
self.durs = tf.placeholder(tf.float32, shape=[None, 1], name="duration")
# the placeholder for the output
self.y = tf.placeholder(tf.float32, shape=[None, 2], name="labels")
# the placeholder for the dropout keep probability
self.keep_prob = tf.placeholder(tf.float32)
# build network, return outputs, variables, and offset
self.out = build_convnet(self.x, self.durs, dim=self.dim)
# define cost computation
self.cost = self._get_cost(self.out)
# define computations for showing accuracy of the network
self.predictor = softmax(self.out)
self.correct = tf.equal(tf.argmax(self.predictor, 1),
tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct, tf.float32))
# Utitlity tensors for computing the f1 score
sums = tf.argmax(self.predictor, 1) + tf.argmax(self.y, 1)
difs = tf.argmax(self.predictor, 1) - tf.argmax(self.y, 1)
# Compute f1 score
self.true_pos = tf.reduce_sum(tf.cast(tf.equal(sums, 2), tf.int32))
self.false_pos = tf.reduce_sum(tf.cast(tf.equal(difs, 1), tf.int32))
self.false_neg = tf.reduce_sum(tf.cast(tf.equal(difs, -1), tf.int32))
self.precision = self.true_pos/(self.true_pos + self.false_pos)
self.recall = self.true_pos/(self.true_pos + self.false_neg)
self.f1 = 2*self.precision*self.recall/(self.precision + self.recall)
# A function to test the validity of the computation graph
def test_shp(self):
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
x_dummy = np.zeros([200,37,self.dim,1])
dur_dummy = np.zeros([200,1])
tmp = sess.run((self.out), feed_dict= {
self.x: x_dummy,
self.durs: dur_dummy,
self.keep_prob: 1
})
print tmp.shape
'''
The function that defines the loss of the network
called from the constructor
'''
def _get_cost(self, logits, weight=50000.0):
# compute loss
self.tlogits = logits
self.tlabels = self.y
each_ce = tf.reshape(tf.nn.softmax_cross_entropy_with_logits(
logits = logits,
labels = self.y
), (-1,1))
pos = tf.slice(self.y, [0,1], [tf.shape(self.y)[0], 1])
# get proportion of events that are community for class weighting
ratio = 1-tf.reduce_mean(pos)
# perform class weighting
self.pwts = tf.multiply(pos,ratio)
self.nwts = tf.multiply(tf.subtract(1.0, pos), 1-ratio)
self.wts = tf.add(self.pwts, self.nwts)
self.each_ce = each_ce
self.w_ce = tf.multiply(each_ce, self.wts)
# loss is the weighted average of cross-entropies
loss = tf.reduce_mean(self.w_ce)
return loss
'''
The function to make a prediction for each class for each pixel
given some batch of input images
'''
# model_path: the location of the checkpoint in which the trained
# model was saved
# x: the tensor storing the input data to be predicted
def predict(self, x, model_path=None):
init = tf.global_variables_initializer()
with tf.Session() as sess:
# initialize all variables
sess.run(init)
# set weights to saved values
if (model_path != None):
self.restore(sess, model_path)
y_emp = np.empty((x.shape[0], x.shape[1], x.shape[2], self.classes))
prediction = sess.run(self.predictor, feed_dict={
self.x: x,
self.y: y_emp,
self.keep_prob: 1.0
})
return prediction
'''
The function to save the current weights to a file in order to restore from
them later
'''
# sess: the current session with the desired variable values
# model_path: the location in which to store the weights
# RETURNS: the location where it was saved
def save(self, sess, model_path):
saver = tf.train.Saver()
save_path = saver.save(sess, model_path)
return save_path
'''
The function to restore a previous session's weights to the current session
'''
# sess: the current session with the weights to be replaced
# model_path: the location of the weights to restore
# RETURNS: None
def restore(self, sess, model_path):
saver = tf.train.Saver()
saver.restore(sess, model_path)
| 7,062 | 34.671717 | 110 | py |
aircraftnoise | aircraftnoise-master/classifier/scripts/use_convnet.py | from models.convnet import ConvNet
from servers.convnetserver import ConvNetServer
from adapters.macadapter import MACAdapter
from preprocessing.preprocessor import Preprocessor
import numpy as np
import tensorflow as tf
'''
CONFIGURATION
'''
'''
Preprocessing
'''
RAW_FILE = '../raw_data/400_community_events.csv'
DIMENSION = 37
# IDs of events in the first set that kill it
bad = []
'''
Adapter
'''
# Number of folds for k-fold cross-validation (decided during preprocessing)
FOLDS = None
'''
Server
'''
# These are intermediates created by preprocessing and used by network
# directory to get training, validation, and testing data from
INPUT_DIR = "use_test"
# directory to write all log, predictions, and saved models to
OUTPUT_DIR = "use_out"
# dummy to make the network happy
BATCH_SIZE = None
'''
Network
'''
# file location of weights to restore from (i.e. weights/model1.ckpt)
INITIAL_WEIGHTS = 'checkpoints/cvd_model.ckpt'
'''
SCRIPT
'''
# Only run if this is the main module to be run
if __name__ == '__main__':
# build preprocessor
ppr = Preprocessor()
# Process raw data
X, Y, events_found = ppr.get_raw_data(DIMENSION, [RAW_FILE], bad)
X, Y = ppr.remove_outliers(X, Y)
X, Y = ppr.normalize(X, Y)
trX, trY, teX, teY, vaX, vaY = ppr.partition_for_training(X, Y, 0.0, 1.0)
ppr.store_training_partitions(trX, trY, teX, teY, vaX, vaY, INPUT_DIR)
# build adapter
adapter = MACAdapter(INPUT_DIR, DIMENSION, FOLDS)
# build model
convnet = ConvNet(DIMENSION)
# build server
server = ConvNetServer(adapter, OUTPUT_DIR,
batch_size = BATCH_SIZE,
verbose = True,
use=True)
x, durs, _ = server.get_testing_batch()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
convnet.restore(sess, INITIAL_WEIGHTS)
predictions = sess.run((convnet.predictor), feed_dict={
convnet.x: x,
convnet.durs: durs
})
# Get event ids
_, _, ids = adapter.get_ids()
# Display aircraft probability for each ID
for i in range(0, len(ids)):
server.log(("Event %d: aircraft probability %.3f"%(
ids[i], predictions[i][0])), "info")
| 2,293 | 24.208791 | 77 | py |
aircraftnoise | aircraftnoise-master/classifier/scripts/example_from_api.py |
import json
from models.convnet import ConvNet
from servers.convnetserver import ConvNetServer
from adapters.macadapter import MACAdapter
from preprocessing.preprocessor import Preprocessor
import numpy as np
import tensorflow as tf
'''
CONFIGURATION
'''
# Example JSON file
EXAMPLE_FILE = '../raw_data/sample.json'
'''
Preprocessing
'''
DIMENSION = 37
# IDs of events in the first set that kill it
bad = []
'''
Adapter
'''
# Number of folds for k-fold cross-validation (decided during preprocessing)
FOLDS = None
'''
Server
'''
# These are intermediates created by preprocessing and used by network
# directory to get training, validation, and testing data from
INPUT_DIR = "use_test"
# directory to write all log, predictions, and saved models to
OUTPUT_DIR = "use_out"
# dummy to make the network happy
BATCH_SIZE = None
'''
Network
'''
# file location of weights to restore from (i.e. weights/model1.ckpt)
INITIAL_WEIGHTS = 'checkpoints/cvd_model/cvd_model.ckpt'
'''
SCRIPT
'''
# Only run if this is the main module to be run
if __name__ == '__main__':
# JSON object returned from api_call
# replace this with however you would like it to work in production
json_data = json.load(open(EXAMPLE_FILE))
# build preprocessor
ppr = Preprocessor()
# Process raw data
#X, Y, events_found = ppr.get_raw_data(DIMENSION, [RAW_FILE], bad)
X, Y, events_found = ppr.get_from_json(DIMENSION, json_data)
X, Y = ppr.remove_outliers(X, Y)
X, Y = ppr.normalize(X, Y)
trX, trY, teX, teY, vaX, vaY = ppr.partition_for_training(X, Y, 0.0, 1.0)
ppr.store_training_partitions(trX, trY, teX, teY, vaX, vaY, INPUT_DIR)
# build adapter
adapter = MACAdapter(INPUT_DIR, DIMENSION, FOLDS)
# build model
convnet = ConvNet(DIMENSION)
# build server
server = ConvNetServer(adapter, OUTPUT_DIR,
batch_size = BATCH_SIZE,
verbose = True,
use=True)
x, durs, _ = server.get_testing_batch()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
convnet.restore(sess, INITIAL_WEIGHTS)
predictions = sess.run((convnet.predictor), feed_dict={
convnet.x: x,
convnet.durs: durs
})
# Get event ids
_, _, ids = adapter.get_ids()
ret = [{"eventID": ids[i], "aircraftProbability": predictions[i][0]} for i in range(0, len(ids))]
# Encode and send the labels back here
print ret
# # Display aircraft probability for each ID
# for i in range(0, len(ids)):
# server.log(("Event %d: aircraft probability %.3f"%(
# ids[i], predictions[i][0])), "info")
| 2,724 | 24.707547 | 101 | py |
aircraftnoise | aircraftnoise-master/classifier/scripts/queue.py |
import json
import os
import psycopg2
import psycopg2.extras
import shutil
import boto3
import signal
import time
import datetime
from models.convnet import ConvNet
from servers.convnetserver import ConvNetServer
from adapters.macadapter import MACAdapter
from preprocessing.preprocessor import Preprocessor
import numpy as np
import tensorflow as tf
class GracefulKiller:
# http://stackoverflow.com/questions/18499497/how-to-process-sigterm-signal-gracefully
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
class MachineLearning():
def __init__(self):
'''
CONFIGURATION
'''
# Example JSON file
self.MODEL = 1
'''
Preprocessing
'''
self.DIMENSION = 37
# IDs of events in the first set that kill it
self.bad = []
'''
Adapter
'''
# Number of folds for k-fold cross-validation (decided during preprocessing)
self.FOLDS = None
'''
Server
'''
# These are intermediates created by preprocessing and used by network
# directory to get training, validation, and testing data from
self.INPUT_DIR = "use_test"
# directory to write all log, predictions, and saved models to
self.OUTPUT_DIR = "use_out"
# dummy to make the network happy
self.BATCH_SIZE = None
'''
Network
'''
# file location of weights to restore from (i.e. weights/model1.ckpt)
self.INITIAL_WEIGHTS = 'checkpoints/cvd_model/cvd_model.ckpt'
def db_open(self):
self.conn = psycopg2.connect(
"application_name=machine_learning" +
" host=" + os.environ['PGHOST'] +
" dbname=" + os.environ['PGDATABASE'] +
" user=" + os.environ['PGUSER'] +
" password=" + os.environ['PGPASSWORD'])
def sqs_connect_to_queue(self):
try:
self.sqs = boto3.resource('sqs',
aws_access_key_id=os.environ['aws_access_key_id'],
aws_secret_access_key=os.environ['aws_secret_access_key'],
region_name=os.environ['region'])
self.queue = self.sqs.get_queue_by_name(QueueName=os.environ['queue'])
except Exception as e:
self._catch_error(sys._getframe().f_code.co_name, e)
def db_close(self):
self.conn.close()
def by_infile(self, infile):
try:
shutil.rmtree(self.OUTPUT_DIR)
except:
pass
self.db_open()
json_data = self.get_events_from_infile(infile)
# build preprocessor
ppr = Preprocessor()
# Process raw data
#X, Y, events_found = ppr.get_raw_data(DIMENSION, [RAW_FILE], bad)
X, Y, events_found = ppr.get_from_json(self.DIMENSION, json_data)
X, Y = ppr.remove_outliers(X, Y)
X, Y = ppr.normalize(X, Y)
trX, trY, teX, teY, vaX, vaY = ppr.partition_for_training(X, Y, 0.0, 1.0)
ppr.store_training_partitions(trX, trY, teX, teY, vaX, vaY, self.INPUT_DIR)
# build adapter
adapter = MACAdapter(self.INPUT_DIR, self.DIMENSION, self.FOLDS)
# build model
convnet = ConvNet(self.DIMENSION)
# build server
server = ConvNetServer(adapter, self.OUTPUT_DIR,
batch_size = self.BATCH_SIZE,
verbose = True,
use=True)
x, durs, _ = server.get_testing_batch()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
convnet.restore(sess, self.INITIAL_WEIGHTS)
predictions = sess.run((convnet.predictor), feed_dict={
convnet.x: x,
convnet.durs: durs
})
# Get event ids
_, _, ids = adapter.get_ids()
results = [{"eventID": int(ids[i]), "ml": {"aircraftProbability": round(np.around(predictions[i][0],decimals=4),4), "model": self.MODEL}} for i in range(0, len(ids))]
for result in results:
self.insert_result_for_event(result)
self.db_close()
def get_events_from_infile(self, infile):
cur = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
sql = "SELECT eventid::int, meta->'ehistory' as ehistory FROM macnoms.events WHERE infile = %s AND stime > '2017-01-01'"
data = [infile]
cur.execute(sql, data)
results = cur.fetchall()
return results
def insert_result_for_event(self, result):
cur = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
sql = "UPDATE macnoms.events set meta = jsonb_set(meta, '{ml}', %s) WHERE eventid = %s"
data = [json.dumps(result['ml']), result['eventID']]
self.conn.commit()
cur.execute(sql, data)
'''
SCRIPT
'''
# Only run if this is the main module to be run
if __name__ == "__main__":
q = MachineLearning()
q.sqs_connect_to_queue()
killer = GracefulKiller()
def readQueue():
jobs = q.queue.receive_messages()
if len(jobs) > 0:
for message in jobs:
try:
message_body = json.loads(message.body)
except ValueError:
print('Invalid json')
message_body = {}
message.delete()
if 'job' in message_body:
d = datetime.datetime.now()
print(str(datetime.datetime.now()) + ': ' + message.body)
try:
data = message_body['job']
this_category = data['category']
this_operation = data['operation']
except Exception as e:
message.delete()
q._catch_error('queue.py', e)
return True
if this_category == 'machineLearning':
if this_operation == 'byInfile':
q.by_infile(data['inFile'])
else:
q._catch_error('main.py', 'Unknown operation (' + str(this_operation) + ')')
message.delete()
else:
q._catch_error('main.py', 'Unknown category (' + str(this_category) + ')')
message.delete()
print('---Done---')
return True
else:
q._catch_error('main.py', 'JSON data does not contain the a job value')
message.delete()
return True
else:
return False
while True:
if killer.kill_now: # Check for sigkill
exit()
if readQueue():
pass
else:
time.sleep(60) # Avoid too many queue polls
| 7,149 | 34.39604 | 174 | py |
aircraftnoise | aircraftnoise-master/classifier/scripts/example_training_from_api.py |
import json
from models.convnet import ConvNet
from servers.convnetserver import ConvNetServer
from adapters.macadapter import MACAdapter
from preprocessing.preprocessor import Preprocessor
from training.convnettrainer import ConvNetTrainer
import numpy as np
import tensorflow as tf
'''
CONFIGURATION
'''
# Example JSON file
EXAMPLE_FILE = '../raw_data/sample.json'
'''
Preprocessing
'''
DIMENSION = 37
# IDs of events in the first set that kill it
bad = []
'''
Adapter
'''
# Number of folds for k-fold cross-validation (decided during preprocessing)
FOLDS = None
'''
Server
'''
# These are intermediates created by preprocessing and used by network
# directory to get training, validation, and testing data from
INPUT_DIR = "training_intermediate"
# directory to write all log, predictions, and saved models to
# script will exit before training if this exists (to avoid overwriting)
OUTPUT_DIR = "training_out"
'''
Network
'''
# file location of weights to restore from (i.e. weights/model1.ckpt)
# I recommend you train from scratch - so set this to None
INITIAL_WEIGHTS = None
#INITIAL_WEIGHTS = 'checkpoints/cvd_model.ckpt'
'''
Trainer
'''
# number of epochs to train for
EPOCHS = 60
# number of training steps in each epoch
STEPS_PER_EPOCH = 85
# string name of optimizer to use
OPTIMIZER = "Adam"
# keyword arguments for optimizer definition
# learning_rate, default = 0.2
OPT_KWARGS = dict([("learning_rate", 0.0004)])
# probability value to use for dropout
KEEP_PROB = 0.6
# training batch size
BATCH_SIZE = 2000
# step at which to log status at modulo 0
DISPLAY_STEP = 10
# The location in which to save the model
SAVE_NAME = "example_training.ckpt"
'''
SCRIPT
'''
# Only run if this is the main module to be run
if __name__ == '__main__':
# JSON object returned from api_call
# replace this with however you would like it to work in production
json_data = json.load(open(EXAMPLE_FILE))
# NOTE if events in json object have neither "aircraft" nor "community" fields
# in they will be labeled as community for training - probably try to avoid this
# build preprocessor
ppr = Preprocessor()
# Process raw data
#X, Y, events_found = ppr.get_raw_data(DIMENSION, [RAW_FILE], bad)
X, Y, events_found = ppr.get_from_json(DIMENSION, json_data)
X, Y = ppr.remove_outliers(X, Y)
X, Y = ppr.normalize(X, Y)
# Shove all events into the "training" subdirectory
trX, trY, teX, teY, vaX, vaY = ppr.partition_for_training(X, Y, 1.0, 0.0)
# Store events in intermediate directory (will be deleted on subsequent trainings)
ppr.store_training_partitions(trX, trY, teX, teY, vaX, vaY, INPUT_DIR)
# build adapter
adapter = MACAdapter(INPUT_DIR, DIMENSION, FOLDS)
# build model
convnet = ConvNet(DIMENSION)
# build server
server = ConvNetServer(adapter, OUTPUT_DIR,
batch_size = BATCH_SIZE,
verbose = True,
use=False)
# build trainer
trainer = ConvNetTrainer(convnet, server, EPOCHS, STEPS_PER_EPOCH,
optimizer = OPTIMIZER,
opt_kwargs = OPT_KWARGS,
keep_prob = KEEP_PROB,
batch_size = BATCH_SIZE,
display_step = DISPLAY_STEP)
# initiate training
trainer.train(
restore_model = INITIAL_WEIGHTS,
save_model = SAVE_NAME
)
| 3,465 | 26.951613 | 86 | py |
aircraftnoise | aircraftnoise-master/classifier/scripts/test_shape.py | from models.convnet import ConvNet
from servers.convnetserver import ConvNetServer
from training.convnettrainer import ConvNetTrainer
from adapters.macadapter import MACAdapter
import numpy as np
'''
CONFIGURATION
'''
'''
Server
'''
# directory to get training, validation, and testing data from
INPUT_DIR = "devin"
# directory to write all log, predictions, and saved models to
OUTPUT_DIR = "devout"
'''
Network
'''
'''
Training
'''
# number of epochs to train for
EPOCHS = 1000
# number of training steps in each epoch
STEPS_PER_EPOCH = 250
# string name of optimizer to use
OPTIMIZER = "Momentum"
# keyword arguments for optimizer definition
# learning_rate, default = 0.2
# decay_rate, default = 0.95
# momentum, default = 0.2
OPT_KWARGS = dict([("learning_rate",0.006), ("momentum",0.0)])
# file location of weights to restore from (i.e. weights/model1.ckpt)
INITIAL_WEIGHTS = './poor.ckpt'
# probability value to use for dropout
KEEP_PROB = 1.0
# training batch size
BATCH_SIZE = 400
# step at which to log status at modulo 0
DISPLAY_STEP = 5
'''
SCRIPT
'''
# Only run if this is the main module to be run
if __name__ == '__main__':
# build adapter
adapter = MACAdapter(INPUT_DIR)
# build model
convnet = ConvNet(10)
# build server
server = ConvNetServer(adapter, OUTPUT_DIR,
batch_size = BATCH_SIZE)
# build trainer
trainer = ConvNetTrainer(convnet, server, EPOCHS, STEPS_PER_EPOCH,
optimizer = OPTIMIZER,
opt_kwargs = OPT_KWARGS)
convnet.test_shp()
| 1,621 | 21.527778 | 70 | py |
aircraftnoise | aircraftnoise-master/classifier/scripts/cv_convnet.py | from models.convnet import ConvNet
from servers.convnetserver import ConvNetServer
from training.convnettrainer import ConvNetTrainer
from adapters.macadapter import MACAdapter
from testing.convnettester import ConvNetTester
import numpy as np
'''
CONFIGURATION
'''
'''
Adapter
'''
# Number of folds for k-fold cross-validation (decided during preprocessing)
FOLDS = 10
'''
Server
'''
# directory to get training, validation, and testing data from
#INPUT_DIR = "10foldcv"
INPUT_DIR = "10foldcv"
# directory to write all log, predictions, and saved models to
OUTPUT_DIR = "cvout"
'''
Network
'''
'''
Training
'''
# number of epochs to train for
EPOCHS = 60
# number of training steps in each epoch
STEPS_PER_EPOCH = 85
# string name of optimizer to use
OPTIMIZER = "Adam"
# keyword arguments for optimizer definition
# learning_rate, default = 0.2
OPT_KWARGS = dict([("learning_rate", 0.0004)])
# file location of weights to restore from (i.e. weights/model1.ckpt)
INITIAL_WEIGHTS = None
# probability value to use for dropout
KEEP_PROB = 0.6
# training batch size
BATCH_SIZE = 2000
# step at which to log status at modulo 0
DISPLAY_STEP = 10
# The interpolated dimensionality of each octave
DIMENSION = 37
'''
Tester
'''
# Number of trials to do for each fold (stats will be averaged)
TRIALS_PER_FOLD = 5
'''
SCRIPT
'''
# Only run if this is the main module to be run
if __name__ == '__main__':
# build adapter
adapter = MACAdapter(INPUT_DIR, DIMENSION, FOLDS)
# build model
convnet = ConvNet(DIMENSION)
# build server
server = ConvNetServer(adapter, OUTPUT_DIR,
batch_size = BATCH_SIZE,
verbose = False)
# build trainer
trainer = ConvNetTrainer(convnet, server, EPOCHS, STEPS_PER_EPOCH,
optimizer = OPTIMIZER,
opt_kwargs = OPT_KWARGS,
keep_prob = KEEP_PROB,
batch_size = BATCH_SIZE,
display_step = DISPLAY_STEP)
# build tester
tester = ConvNetTester(convnet, server, trainer)
# initiate cross-validation
tester.run_cross_validation(
folds = FOLDS,
trials_per_fold = TRIALS_PER_FOLD
)
| 2,258 | 21.818182 | 76 | py |
aircraftnoise | aircraftnoise-master/classifier/scripts/train_convnet.py | from models.convnet import ConvNet
from servers.convnetserver import ConvNetServer
from training.convnettrainer import ConvNetTrainer
from adapters.macadapter import MACAdapter
import numpy as np
'''
CONFIGURATION
'''
'''
Adapter
'''
# Number of folds for k-fold cross-validation (decided during preprocessing)
FOLDS = None
'''
Server
'''
# directory to get training, validation, and testing data from
INPUT_DIR = "full_training"
# directory to write all log, predictions, and saved models to
OUTPUT_DIR = "fullout"
'''
Network
'''
'''
Training
'''
# number of epochs to train for
EPOCHS = 60
# number of training steps in each epoch
STEPS_PER_EPOCH = 85
# string name of optimizer to use
OPTIMIZER = "Adam"
# keyword arguments for optimizer definition
# learning_rate, default = 0.2
OPT_KWARGS = dict([("learning_rate", 0.0004)])
# file location of weights to restore from (i.e. weights/model1.ckpt)
INITIAL_WEIGHTS = None
# probability value to use for dropout
KEEP_PROB = 0.6
# training batch size
BATCH_SIZE = 2000
# step at which to log status at modulo 0
DISPLAY_STEP = 10
# The interpolated dimensionality of each octave
DIMENSION = 37
# The location in which to save the model
SAVE_NAME = "cvd_model.ckpt"
'''
SCRIPT
'''
# Only run if this is the main module to be run
if __name__ == '__main__':
# build adapter
adapter = MACAdapter(INPUT_DIR, DIMENSION, FOLDS)
# build model
convnet = ConvNet(DIMENSION)
# build server
server = ConvNetServer(adapter, OUTPUT_DIR,
batch_size = BATCH_SIZE,
verbose = True)
# build trainer
trainer = ConvNetTrainer(convnet, server, EPOCHS, STEPS_PER_EPOCH,
optimizer = OPTIMIZER,
opt_kwargs = OPT_KWARGS,
keep_prob = KEEP_PROB,
batch_size = BATCH_SIZE,
display_step = DISPLAY_STEP)
# initiate training
trainer.train(
restore_model = INITIAL_WEIGHTS,
save_model = SAVE_NAME
)
| 2,077 | 22.613636 | 76 | py |
aircraftnoise | aircraftnoise-master/classifier/scripts/__init__.py | 0 | 0 | 0 | py |
|
aircraftnoise | aircraftnoise-master/classifier/servers/convnetserver.py | import os
import sys
import numpy as np
import errno
import logging
import time
'''
The object that handles the bulk of the interactions with the operating system
This includes getting feed_dict data, storing predictions, and logging training
'''
class ConvNetServer(object):
'''
The server constructor
'''
# input_dir: The directory to find all data for training/validation
# output_dir: The directory to store all data from training
# batch_size: The batch size to use for training (unless otherwise
# specified at call-time)
def __init__(self, adapter,
output_dir = "output",
batch_size = 1,
verbose = True,
use = False):
# store adapter
self.adapter = adapter
self.use = use
# make output path absolute
self.output_dir = os.path.abspath(output_dir)
self.predictions_dir = os.path.join(self.output_dir, "predictions")
self.weights_dir = os.path.join(self.output_dir, "weights")
# Check to make sure directory structure is valid
self._check_io()
# Set values for managing training
self.batch_size = batch_size
# configure the logging format
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
log_formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
# create file handler
if not self.use:
log_filename = os.path.join(output_dir, "training" + str(time.time()) + ".log")
else:
log_filename = os.path.join(output_dir, "use" + str(time.time()) + ".log")
file_handler = logging.FileHandler(log_filename)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
# create console handler
console_handler = logging.StreamHandler()
if verbose:
console_handler.setLevel(logging.INFO)
else:
console_handler.setLevel(logging.WARNING)
console_handler.setFormatter(log_formatter)
# add handlers to root logger
self.logger.addHandler(console_handler)
self.logger.addHandler(file_handler)
# get adapter for image data
self.adapter = adapter
'''
A function called by the constructor to make sure everything is in order
for reading and writing to disk
'''
def _check_io(self):
# Create structure for output. Error out if already exists to avoid
# overwriting
if not os.path.exists(self.output_dir):
print "Output dir: %s does not exist - Creating it"%self.output_dir
os.makedirs(self.output_dir)
if not self.use:
os.makedirs(self.predictions_dir)
os.makedirs(self.weights_dir)
else:
print "Output dir: %s exists - ERROR"%self.output_dir
sys.exit(0)
'''
A function to save the weights of the network to disk
Calls corresponding net function with appropriate location
'''
# net: The network object whose weights we're saving
# iteration: The current iteration of training
# session: The current tensorflow session running
def save_weights(self, net, iteration, session, custom_name = None):
if (custom_name == None):
save_path = os.path.join(self.weights_dir, 'step_' + str(iteration) + '.ckpt')
else:
save_path = os.path.join(self.weights_dir, custom_name)
self.log("Saving model at " + save_path, "warning")
net.save(session, save_path)
'''
A function to log information about the training process
'''
# message: the message to log
def log(self, message, ltype="debug"):
if (ltype=="debug"):
self.logger.debug(message)
elif (ltype=="warning"):
self.logger.warning(message)
else:
self.logger.info(message)
'''
Set of functions which serve data to the training, validation, and testing
procedure
'''
def get_training_batch(self, this_batch_size = None):
if this_batch_size is None:
this_batch_size = self.batch_size
return self.adapter.get_batch(this_batch_size, "training")
def get_validation_batch(self, this_batch_size = None):
if this_batch_size is None:
this_batch_size = self.batch_size
return self.adapter.get_batch(this_batch_size, "validation")
def get_testing_batch(self):
return self.adapter.get_batch(None, "testing")
| 4,669 | 31.887324 | 91 | py |
aircraftnoise | aircraftnoise-master/classifier/servers/__init__.py | 0 | 0 | 0 | py |
|
aircraftnoise | aircraftnoise-master/classifier/preprocessing/crossvalidate.py | from preprocessor import Preprocessor
import sys
# Proportions of data in each resulting set
TRPROP = 0.8 # Training
TEPROP = 0.0 # Testing
# VALIDATION SET IS REST
# Names of files containing the raw data
input_files = ['../raw_data/oml_final.csv', '../raw_data/400_community_events.csv']
# IDs of events in the first set that kill it
bad = [53905373, 53906999, 53907026, 53907026, 53907030, 53905373,
53905426, 53907014, 53905400, 53905433, 53905397, 53905371,
53905392]
if __name__ == "__main__":
# Handle invalid calls
if (len(sys.argv) < 4):
print "Please provide dimensionality, output location, and number of folds"
sys.exit()
elif (len(sys.argv) > 4):
print "Expecting exactly three arguments (dimensionality, location, and folds)"
sys.exit()
# Get provided dimesnionality
dim = int(sys.argv[1])
location = sys.argv[2]
folds = int(sys.argv[3])
print
print '\033[92m' + "*************************************************" + '\033[0m'
print
print "Provided dimensionality:", dim
print "Provided location: ", location
print "Provided folds: ", folds
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Constructing preprocesor object..."
print
ppr = Preprocessor()
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Fetching data from storage..."
print
X, Y, events_found = ppr.get_raw_data(dim, input_files, bad)
print
print "Number of raw events found in database: ", events_found
print "Number of raw events that script was able to parse:", X.shape[0]
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Removing invalid and outlying events from dataset..."
print
X, Y = ppr.remove_outliers(X, Y)
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Normalizing data to have mean zero..."
print
X, Y = ppr.normalize(X, Y)
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Partitioning the data into folds"
print
lst = ppr.partition_for_cross_validation(X, Y, folds)
for i in range(0,folds):
print "fold:", i, lst[i][0].shape[0], "events"
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Storing the data at", location
print
ppr.store_cv_folds(lst, location)
print '\033[92m' + "*************************************************" + '\033[0m'
print
| 2,772 | 30.511364 | 87 | py |
aircraftnoise | aircraftnoise-master/classifier/preprocessing/event2d.py | import tensorflow as tf
import numpy as np
import json
import math
import sys
keys = ["6.3","8.0","10.0","12.5","16.0","20.0","25.0","31.5","40.0","50.0","63.0","80.0","100","125","160","200","250","315","400","500","630","800","1000","1250","1600","2000","2500","3150","4000","5000","6300","8000","10000","12500","16000","20000","overall"]
class Event2D:
def lin_interp(self, rawmat, dim):
ncol = np.shape(rawmat)[0]
ret = np.empty([ncol, dim], dtype=float)
rmx = np.shape(rawmat)[1] - 1
inc = 1.0*(rmx-1)/dim
start = inc/2
for i in range(ncol):
for j in range(dim):
rw = start + j*inc
ind1 = int(math.floor(rw))
weight1 = 1-(rw-ind1)
#ind2 = min(ind1 + 1, rmx-1)
ind2 = ind1+1
weight2 = 1-weight1
ret[i,j] = weight1*rawmat[i,ind1] + weight2*rawmat[i,ind2]
return ret
def to_mat(self, rawdat, duration):
if ('meta' in rawdat):
pre = int(rawdat['meta']['pre'])
elif ('ehistory' in rawdat):
pre = int(rawdat['ehistory']['meta']['pre']['length'])
elif ('hist' in rawdat):
pre = 0
rawmat = np.empty([len(keys),duration])
i = 0
try:
rawdat['data']
for key in keys:
j = 0
k = 0
for t in rawdat['data'][key]:
if ((j > pre) and (j < pre + duration)):
rawmat[i, k] = t
k = k + 1
j = j + 1
i = i + 1
except KeyError, e:
for key in keys:
j = 0
k = 0
if (key == 'overall'):
for t in rawdat['ehistory'][key]:
if ((j > pre) and (j < pre + duration)):
rawmat[i, k] = t
k = k + 1
j = j + 1
else:
for t in rawdat['ehistory']['freq'][key]:
if ((j > pre) and (j < pre + duration)):
rawmat[i, k] = t
k = k + 1
j = j + 1
i = i + 1
return rawmat
def get_data(self, rawdat, duration, dim):
rawmat = self.to_mat(rawdat, duration)
return self.lin_interp(rawmat, dim)
def __init__(self, row, dim, src='file'):
self.dim = dim
self.flag = 0
if (src=='file'):
self.id = int(row[0])
rawdat = json.loads(row[8])
else:
self.id = row['eventid']
rawdat = row
if not (('ehistory' in rawdat) or ('meta' in rawdat) or ('hist' in rawdat)):
self.flag = 1
else:
if (src=='file'):
self.label = int('aircraft' in json.loads(row[9]).keys())
else:
self.label = int('aircraft' in rawdat['ehistory']['meta'])
if ('meta' in rawdat):
self.duration = int(rawdat['meta']['hist'])
elif ('ehistory' in rawdat):
self.duration = int(rawdat['ehistory']['meta']['event']['length'])
elif ('hist' in rawdat):
self.duration = int(rawdat['hist'])
else:
self.flag = 1
if not self.flag == 1:
self.data = self.get_data(rawdat, self.duration, dim)
def present(self, n):
print self.id
def to_array(self):
return np.append(self.data.flatten(),self.duration).reshape((1,37*self.dim+1))
| 3,647 | 33.415094 | 262 | py |
aircraftnoise | aircraftnoise-master/classifier/preprocessing/__init__.py | 0 | 0 | 0 | py |
|
aircraftnoise | aircraftnoise-master/classifier/preprocessing/preprocess.py | from preprocessor import Preprocessor
import sys
# Proportions of data in each resulting set
TRPROP = 1.0 # Training
TEPROP = 0.0 # Testing
# VALIDATION SET IS REST
# Names of files containing the raw data
input_files = ['../raw_data/oml_final.csv', '../raw_data/400_community_events.csv']
# IDs of events in the first set that kill it
bad = [53905373, 53906999, 53907026, 53907026, 53907030, 53905373,
53905426, 53907014, 53905400, 53905433, 53905397, 53905371,
53905392]
if __name__ == "__main__":
# Handle invalid calls
if (len(sys.argv) < 3):
print "Please provide dimensionality and output location"
sys.exit()
elif (len(sys.argv) > 3):
print "Expecting exactly two arguments (dimensionality and location)"
sys.exit()
# Get provided dimesnionality
dim = int(sys.argv[1])
location = sys.argv[2]
print
print '\033[92m' + "*************************************************" + '\033[0m'
print
print "Provided dimensionality:", dim
print "Provided location: ", location
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Constructing preprocesor object..."
print
ppr = Preprocessor()
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Fetching data from storage..."
print
X, Y, events_found = ppr.get_raw_data(dim, input_files, bad)
print
print "Number of raw events found in database: ", events_found
print "Number of raw events that script was able to parse:", X.shape[0]
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Removing invalid and outlying events from dataset..."
print
X, Y = ppr.remove_outliers(X, Y)
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Normalizing data to have mean zero..."
print
X, Y = ppr.normalize(X, Y)
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Partitioning the data into training, testing, and validation sets"
print
trX, trY, teX, teY, vaX, vaY = ppr.partition_for_training(X, Y, TRPROP, TEPROP)
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Storing the data at", location
print
ppr.store_training_partitions(trX, trY, teX, teY, vaX, vaY, location)
print '\033[92m' + "*************************************************" + '\033[0m'
print
| 2,682 | 30.940476 | 86 | py |
aircraftnoise | aircraftnoise-master/classifier/preprocessing/preprocessor.py | import numpy as np
import tensorflow as tf
import csv
import json
import math
import sys
import random
import os
from event2d import Event2D
class Preprocessor:
# Contructor
# does nothing atm
def __init__(self):
"nothing to be done"
# Utility function of get_raw_data
# adds event to the full numpy array
def _encodenpy(self, e, x, y):
return np.concatenate((x, e.to_array()), axis=0), np.concatenate((y, np.array([[e.id, e.label]])), axis=0)
# Utility function of get_raw_data
# converts each line in input file into an event object
# then encodes the event as an array with above function
def _parse(self, rf, dim, bad):
X = np.zeros((0,dim*37+1))
Y = np.zeros((0,2))
has_colnames = csv.Sniffer().has_header(rf.read(1024))
rf.seek(0)
reader = csv.reader(rf)
if has_colnames:
next(reader)
num_rows = 0
for row in reader:
num_rows = num_rows + 1
if int(row[0]) not in bad:
event = Event2D(row, dim)
if event.flag != 1:
X, Y = self._encodenpy(event, X, Y)
return X, Y, num_rows
# called by main
# Stores raw data as an array with specified dimensionality*37 and durations
# concatenated
# returns tuple of input data and output data
def get_from_json(self, dim, input_data):
# Instantiate empty arrays for data
X = np.zeros((0,dim*37+1))
Y = np.zeros((0,2))
events_found = len(input_data)
# No need to parse since this is done
for dat in input_data:
event = Event2D(dat, dim, src='api')
X, Y = self._encodenpy(event, X, Y)
return X, Y, events_found
# called by main
# Stores raw data as an array with specified dimensionality*37 and durations
# concatenated
# returns tuple of input data and output data
def get_raw_data(self, dim, input_files, bad):
# Instantiate empty arrays for data
X = np.zeros((0,dim*37+1))
Y = np.zeros((0,2))
events_found = 0
for fil in input_files:
print "Reading from " + fil
rf = open(fil, 'rb')
tmpx, tmpy, n_rows = self._parse(rf, dim, bad)
events_found = events_found + n_rows
if (tmpx is not None):
X = np.concatenate((X, tmpx), axis=0)
Y = np.concatenate((Y, tmpy), axis=0)
return X, Y, events_found
# Called by main
# removes invalid and outlying events from dataset
def remove_outliers(self, X, Y):
Xshp1 = X.shape[1]
Xret = np.zeros((0,Xshp1), dtype=np.float32)
Yret = np.zeros((0,2), dtype=np.float32)
i = 0
for lin in X:
if not ((np.isnan(lin).any()) or (np.max(lin) > 1e+4) or (np.min(lin) < -1e+4)):
Xret = np.concatenate((Xret, np.reshape(X[i,:], (1,Xshp1))), axis=0)
Yret = np.concatenate((Yret, np.reshape(Y[i,:], (1,2))), axis=0)
i = i + 1
return Xret, Yret
# Called by main
# normalizes the data to have mean zero
def normalize(self, X, Y):
mean_duration = np.mean(X[:,-1])
mean_intensity = np.mean(X[:,0:-1])
print "Mean duration before normalization: ", mean_duration
print "Mean intensity before normalization:", mean_intensity
print
X[:,-1] = X[:,-1] - mean_duration
X[:,0:-1] = X[:,0:-1] - mean_intensity
print "Mean duration after normalization: ", np.mean(X[:,-1])
print "Mean intensity after normalization: ", np.mean(X[:,0:-1])
return X, Y
# Called by main
# partition the data into training, testing, and validation sets
def partition_for_training(self, X, Y, trprop, teprop):
trX = np.zeros((0,X.shape[1]), dtype=np.float32)
teX = np.zeros((0,X.shape[1]), dtype=np.float32)
vaX = np.zeros((0,X.shape[1]), dtype=np.float32)
trY = np.zeros((0,2), dtype=np.float32)
teY = np.zeros((0,2), dtype=np.float32)
vaY = np.zeros((0,2), dtype=np.float32)
for i in range(0,X.shape[0]):
r = random.random()
if r < trprop:
trX = np.concatenate((trX, np.reshape(X[i,:], (1,-1))), axis=0)
trY = np.concatenate((trY, np.reshape(Y[i,:], (1,2))), axis=0)
elif r < (trprop + teprop):
teX = np.concatenate((teX, np.reshape(X[i,:], (1,-1))), axis=0)
teY = np.concatenate((teY, np.reshape(Y[i,:], (1,2))), axis=0)
else:
vaX = np.concatenate((vaX, np.reshape(X[i,:], (1,-1))), axis=0)
vaY = np.concatenate((vaY, np.reshape(Y[i,:], (1,2))), axis=0)
return trX, trY, teX, teY, vaX, vaY
# Called by main
# save the data to disk
def store_training_partitions(self, trX, trY, teX, teY, vaX, vaY, location):
flocation = os.path.join(os.getcwd(), location)
trlocation = os.path.join(flocation, "training")
valocation = os.path.join(flocation, "validation")
telocation = os.path.join(flocation, "testing")
if (os.path.exists(flocation)):
print "Location:", location, "exists. Overwriting..."
if not (os.path.exists(trlocation)):
os.makedirs(trlocation)
if not (os.path.exists(valocation)):
os.makedirs(valocation)
if not (os.path.exists(telocation)):
os.makedirs(telocation)
else:
print "Location:", location, "does not exist. Creating..."
os.makedirs(flocation)
os.makedirs(trlocation)
os.makedirs(valocation)
os.makedirs(telocation)
print
np.save(os.path.join(trlocation,'trX.npy'), trX)
np.save(os.path.join(trlocation,'trY.npy'), trY)
np.save(os.path.join(telocation,'teX.npy'), teX)
np.save(os.path.join(telocation,'teY.npy'), teY)
np.save(os.path.join(valocation,'vaX.npy'), vaX)
np.save(os.path.join(valocation,'vaY.npy'), vaY)
# Called by main
# Ramdomly partition data into ~equal size folds
def partition_for_cross_validation(self, X, Y, folds):
prop = 1.0/folds
ret = [[np.zeros((0,X.shape[1]), dtype=np.float32), np.zeros((0,2),
dtype=np.float32)] for i in range(0,folds)]
for i in range(0,X.shape[0]):
r = random.random()
for j in range(0, folds):
if ((r >= j*prop) and (r < (j+1)*prop)):
tmpX = np.concatenate((ret[j][0], np.reshape(X[i,:], (1,-1))), axis=0)
tmpY = np.concatenate((ret[j][1], np.reshape(Y[i,:], (1, 2))), axis=0)
ret[j] = [tmpX, tmpY]
return ret
# Called by main
# Store each fold to disk in specified location
def store_cv_folds(self, lst, location):
flocation = os.path.join(os.getcwd(), location)
if (os.path.exists(flocation)):
print "Location:", location, "exists. Overwriting..."
else:
print "Location:", location, "does not exist. Creating..."
os.makedirs(flocation)
for i in range(0,len(lst)):
tmpX = lst[i][0]
tmpY = lst[i][1]
np.save(os.path.join(flocation, str(i) + 'X.npy'), tmpX)
np.save(os.path.join(flocation, str(i) + 'Y.npy'), tmpY)
print
| 7,521 | 34.649289 | 115 | py |
aircraftnoise | aircraftnoise-master/histogram/make_histogram.py | import re
import numpy as np
import matplotlib.pyplot as plt
f = open('cvlog.log')
accuracies = []
for line in f:
if line[33:].startswith('This Accuracy:'):
this_accuracy = float(line[48:])
if (this_accuracy > 0.1):
accuracies = accuracies + [this_accuracy]
accuracies = np.array(accuracies)
plt.hist(100*accuracies, bins=8, range=(93.3, 100))
plt.title("Fold Accuracies During Cross Validation")
plt.xlabel("Accuracy (%)")
plt.ylabel("Incidence")
plt.show()
print np.std(accuracies)
print np.mean(accuracies)
print np.median(accuracies)
| 577 | 20.407407 | 53 | py |
aircraftnoise | aircraftnoise-master/preprocessing/crossvalidate.py | from preprocessor import Preprocessor
import sys
# Proportions of data in each resulting set
TRPROP = 0.8 # Training
TEPROP = 0.0 # Testing
# VALIDATION SET IS REST
# Names of files containing the raw data
input_files = ['../raw_data/oml_final.csv', '../raw_data/400_community_events.csv']
# IDs of events in the first set that kill it
bad = [53905373, 53906999, 53907026, 53907026, 53907030, 53905373,
53905426, 53907014, 53905400, 53905433, 53905397, 53905371,
53905392]
if __name__ == "__main__":
# Handle invalid calls
if (len(sys.argv) < 4):
print "Please provide dimensionality, output location, and number of folds"
sys.exit()
elif (len(sys.argv) > 4):
print "Expecting exactly three arguments (dimensionality, location, and folds)"
sys.exit()
# Get provided dimesnionality
dim = int(sys.argv[1])
location = sys.argv[2]
folds = int(sys.argv[3])
print
print '\033[92m' + "*************************************************" + '\033[0m'
print
print "Provided dimensionality:", dim
print "Provided location: ", location
print "Provided folds: ", folds
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Constructing preprocesor object..."
print
ppr = Preprocessor()
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Fetching data from storage..."
print
X, Y, events_found = ppr.get_raw_data(dim, input_files, bad)
print
print "Number of raw events found in database: ", events_found
print "Number of raw events that script was able to parse:", X.shape[0]
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Removing invalid and outlying events from dataset..."
print
X, Y = ppr.remove_outliers(X, Y)
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Normalizing data to have mean zero..."
print
X, Y = ppr.normalize(X, Y)
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Partitioning the data into folds"
print
lst = ppr.partition_for_cross_validation(X, Y, folds)
for i in range(0,folds):
print "fold:", i, lst[i][0].shape[0], "events"
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Storing the data at", location
print
ppr.store_cv_folds(lst, location)
print '\033[92m' + "*************************************************" + '\033[0m'
print
| 2,772 | 30.511364 | 87 | py |
aircraftnoise | aircraftnoise-master/preprocessing/event2d.py | import tensorflow as tf
import numpy as np
import json
import math
import sys
import matplotlib.pyplot as plt
keys = ["6.3","8.0","10.0","12.5","16.0","20.0","25.0","31.5","40.0","50.0","63.0","80.0","100","125","160","200","250","315","400","500","630","800","1000","1250","1600","2000","2500","3150","4000","5000","6300","8000","10000","12500","16000","20000","overall"]
class Event2D:
def lin_interp(self, rawmat, dim):
ncol = np.shape(rawmat)[0]
ret = np.empty([ncol, dim], dtype=float)
rmx = np.shape(rawmat)[1] - 1
inc = 1.0*(rmx-1)/dim
start = inc/2
for i in range(ncol):
for j in range(dim):
rw = start + j*inc
ind1 = int(math.floor(rw))
weight1 = 1-(rw-ind1)
#ind2 = min(ind1 + 1, rmx-1)
ind2 = ind1+1
weight2 = 1-weight1
ret[i,j] = weight1*rawmat[i,ind1] + weight2*rawmat[i,ind2]
return ret
def to_mat(self, rawdat, duration):
if ('meta' in rawdat):
pre = int(rawdat['meta']['pre'])
elif ('ehistory' in rawdat):
pre = int(rawdat['ehistory']['meta']['pre']['length'])
elif ('hist' in rawdat):
pre = 0
rawmat = np.empty([len(keys),duration])
i = 0
try:
rawdat['data']
for key in keys:
j = 0
k = 0
for t in rawdat['data'][key]:
if ((j > pre) and (j < pre + duration)):
rawmat[i, k] = t
k = k + 1
j = j + 1
i = i + 1
except KeyError, e:
for key in keys:
j = 0
k = 0
if (key == 'overall'):
for t in rawdat['ehistory'][key]:
if ((j > pre) and (j < pre + duration)):
rawmat[i, k] = t
k = k + 1
j = j + 1
else:
for t in rawdat['ehistory']['freq'][key]:
if ((j > pre) and (j < pre + duration)):
rawmat[i, k] = t
k = k + 1
j = j + 1
i = i + 1
return rawmat
def get_data(self, rawdat, duration, dim):
self.rawmat = self.to_mat(rawdat, duration)
return self.lin_interp(self.rawmat, dim)
def __init__(self, row, dim):
self.dim = dim
self.flag = 0
self.id = int(row[0])
rawdat = json.loads(row[8])
if not (('ehistory' in rawdat) or ('meta' in rawdat) or ('hist' in rawdat)):
self.flag = 1
self.label = -1
self.rawmat = np.zeros((10,10))
else:
self.label = int('aircraft' in json.loads(row[9]).keys())
if ('meta' in rawdat):
self.duration = int(rawdat['meta']['hist'])
elif ('ehistory' in rawdat):
self.duration = int(rawdat['ehistory']['meta']['event']['length'])
elif ('hist' in rawdat):
self.duration = int(rawdat['hist'])
else:
self.flag = 1
if not self.flag == 1:
self.data = self.get_data(rawdat, self.duration, dim)
def present(self, n):
print self.id
def visualize(self):
print(self.id, self.label)
plt.imshow(self.rawmat[:-1, :-1])
plt.axis('off')
plt.xlabel("Time")
plt.ylabel("Frequency")
plt.show()
def to_array(self):
return np.append(self.data.flatten(),self.duration).reshape((1,37*self.dim+1))
| 3,721 | 31.649123 | 262 | py |
aircraftnoise | aircraftnoise-master/preprocessing/__init__.py | 0 | 0 | 0 | py |
|
aircraftnoise | aircraftnoise-master/preprocessing/preprocess.py | from preprocessor import Preprocessor
import sys
# Proportions of data in each resulting set
TRPROP = 1.0 # Training
TEPROP = 0.0 # Testing
# VALIDATION SET IS REST
# Names of files containing the raw data
input_files = ['data/400_community_events.csv', 'data/oml_final.csv']
# IDs of events in the first set that kill it
bad = [53905373, 53906999, 53907026, 53907026, 53907030, 53905373,
53905426, 53907014, 53905400, 53905433, 53905397, 53905371,
53905392]
if __name__ == "__main__":
# Handle invalid calls
if (len(sys.argv) < 3):
print "Please provide dimensionality and output location"
sys.exit()
elif (len(sys.argv) > 3):
print "Expecting exactly two arguments (dimensionality and location)"
sys.exit()
# Get provided dimesnionality
dim = int(sys.argv[1])
location = sys.argv[2]
print
print '\033[92m' + "*************************************************" + '\033[0m'
print
print "Provided dimensionality:", dim
print "Provided location: ", location
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Constructing preprocesor object..."
print
ppr = Preprocessor()
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Fetching data from storage..."
print
X, Y, events_found = ppr.get_raw_data(dim, input_files, bad)
print
print "Number of raw events found in database: ", events_found
print "Number of raw events that script was able to parse:", X.shape[0]
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Removing invalid and outlying events from dataset..."
print
X, Y = ppr.remove_outliers(X, Y)
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Normalizing data to have mean zero..."
print
X, Y = ppr.normalize(X, Y)
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Partitioning the data into training, testing, and validation sets"
print
trX, trY, teX, teY, vaX, vaY = ppr.partition_for_training(X, Y, TRPROP, TEPROP)
print
print '\033[94m' + "*************************************************" + '\033[0m'
print
print "Storing the data at", location
print
ppr.store_training_partitions(trX, trY, teX, teY, vaX, vaY, location)
print '\033[92m' + "*************************************************" + '\033[0m'
print
| 2,668 | 30.77381 | 86 | py |
aircraftnoise | aircraftnoise-master/preprocessing/preprocessor.py | import numpy as np
import tensorflow as tf
import csv
import json
import math
import sys
import random
import os
from event2d import Event2D
class Preprocessor:
# Contructor
# does nothing atm
def __init__(self):
"nothing to be done"
# Utility function of get_raw_data
# adds event to the full numpy array
def _encodenpy(self, e, x, y):
return np.concatenate((x, e.to_array()), axis=0), np.concatenate((y, np.array([[e.id, e.label]])), axis=0)
# Utility function of get_raw_data
# converts each line in input file into an event object
# then encodes the event as an array with above function
def _parse(self, rf, dim, bad):
X = np.zeros((0,dim*37+1))
Y = np.zeros((0,2))
has_colnames = csv.Sniffer().has_header(rf.read(1024))
rf.seek(0)
reader = csv.reader(rf)
if has_colnames:
next(reader)
num_rows = 0
for row in reader:
num_rows = num_rows + 1
if int(row[0]) not in bad:
event = Event2D(row, dim)
event.visualize()
if event.flag != 1:
X, Y = self._encodenpy(event, X, Y)
return X, Y, num_rows
# called by main
# Stores raw data as an array with specified dimensionality*37 and durations
# concatenated
# returns tuple of input data and output data
def get_raw_data(self, dim, input_files, bad):
# Instantiate empty arrays for data
X = np.zeros((0,dim*37+1))
Y = np.zeros((0,2))
events_found = 0
for fil in input_files:
print "Reading from " + fil
rf = open(fil, 'rb')
tmpx, tmpy, n_rows = self._parse(rf, dim, bad)
events_found = events_found + n_rows
if (tmpx is not None):
X = np.concatenate((X, tmpx), axis=0)
Y = np.concatenate((Y, tmpy), axis=0)
return X, Y, events_found
# Called by main
# removes invalid and outlying events from dataset
def remove_outliers(self, X, Y):
Xshp1 = X.shape[1]
Xret = np.zeros((0,Xshp1), dtype=np.float32)
Yret = np.zeros((0,2), dtype=np.float32)
i = 0
for lin in X:
if not ((np.isnan(lin).any()) or (np.max(lin) > 1e+4) or (np.min(lin) < -1e+4)):
Xret = np.concatenate((Xret, np.reshape(X[i,:], (1,Xshp1))), axis=0)
Yret = np.concatenate((Yret, np.reshape(Y[i,:], (1,2))), axis=0)
i = i + 1
return Xret, Yret
# Called by main
# normalizes the data to have mean zero
def normalize(self, X, Y):
mean_duration = np.mean(X[:,-1])
mean_intensity = np.mean(X[:,0:-1])
print "Mean duration before normalization: ", mean_duration
print "Mean intensity before normalization:", mean_intensity
print
X[:,-1] = X[:,-1] - mean_duration
X[:,0:-1] = X[:,0:-1] - mean_intensity
print "Mean duration after normalization: ", np.mean(X[:,-1])
print "Mean intensity after normalization: ", np.mean(X[:,0:-1])
return X, Y
# Called by main
# partition the data into training, testing, and validation sets
def partition_for_training(self, X, Y, trprop, teprop):
trX = np.zeros((0,X.shape[1]), dtype=np.float32)
teX = np.zeros((0,X.shape[1]), dtype=np.float32)
vaX = np.zeros((0,X.shape[1]), dtype=np.float32)
trY = np.zeros((0,2), dtype=np.float32)
teY = np.zeros((0,2), dtype=np.float32)
vaY = np.zeros((0,2), dtype=np.float32)
for i in range(0,X.shape[0]):
r = random.random()
if r < trprop:
trX = np.concatenate((trX, np.reshape(X[i,:], (1,-1))), axis=0)
trY = np.concatenate((trY, np.reshape(Y[i,:], (1,2))), axis=0)
elif r < (trprop + teprop):
teX = np.concatenate((teX, np.reshape(X[i,:], (1,-1))), axis=0)
teY = np.concatenate((teY, np.reshape(Y[i,:], (1,2))), axis=0)
else:
vaX = np.concatenate((vaX, np.reshape(X[i,:], (1,-1))), axis=0)
vaY = np.concatenate((vaY, np.reshape(Y[i,:], (1,2))), axis=0)
return trX, trY, teX, teY, vaX, vaY
# Called by main
# save the data to disk
def store_training_partitions(self, trX, trY, teX, teY, vaX, vaY, location):
flocation = os.path.join(os.getcwd(), location)
trlocation = os.path.join(flocation, "training")
valocation = os.path.join(flocation, "validation")
telocation = os.path.join(flocation, "testing")
if (os.path.exists(flocation)):
print "Location:", location, "exists. Overwriting..."
if not (os.path.exists(trlocation)):
os.makedirs(trlocation)
if not (os.path.exists(valocation)):
os.makedirs(valocation)
if not (os.path.exists(telocation)):
os.makedirs(telocation)
else:
print "Location:", location, "does not exist. Creating..."
os.makedirs(flocation)
os.makedirs(trlocation)
os.makedirs(valocation)
os.makedirs(telocation)
print
np.save(os.path.join(trlocation,'trX.npy'), trX)
np.save(os.path.join(trlocation,'trY.npy'), trY)
np.save(os.path.join(telocation,'teX.npy'), teX)
np.save(os.path.join(telocation,'teY.npy'), teY)
np.save(os.path.join(valocation,'vaX.npy'), vaX)
np.save(os.path.join(valocation,'vaY.npy'), vaY)
# Called by main
# Ramdomly partition data into ~equal size folds
def partition_for_cross_validation(self, X, Y, folds):
prop = 1.0/folds
ret = [[np.zeros((0,X.shape[1]), dtype=np.float32), np.zeros((0,2),
dtype=np.float32)] for i in range(0,folds)]
for i in range(0,X.shape[0]):
r = random.random()
for j in range(0, folds):
if ((r >= j*prop) and (r < (j+1)*prop)):
tmpX = np.concatenate((ret[j][0], np.reshape(X[i,:], (1,-1))), axis=0)
tmpY = np.concatenate((ret[j][1], np.reshape(Y[i,:], (1, 2))), axis=0)
ret[j] = [tmpX, tmpY]
return ret
# Called by main
# Store each fold to disk in specified location
def store_cv_folds(self, lst, location):
flocation = os.path.join(os.getcwd(), location)
if (os.path.exists(flocation)):
print "Location:", location, "exists. Overwriting..."
else:
print "Location:", location, "does not exist. Creating..."
os.makedirs(flocation)
for i in range(0,len(lst)):
tmpX = lst[i][0]
tmpY = lst[i][1]
np.save(os.path.join(flocation, str(i) + 'X.npy'), tmpX)
np.save(os.path.join(flocation, str(i) + 'Y.npy'), tmpY)
print
| 6,979 | 35.165803 | 115 | py |
bottom-up-attention | bottom-up-attention-master/tools/compress_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compress a Fast R-CNN network using truncated SVD."""
import _init_paths
import caffe
import argparse
import numpy as np
import os, sys
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Compress a Fast R-CNN network')
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the uncompressed network',
default=None, type=str)
parser.add_argument('--def-svd', dest='prototxt_svd',
help='prototxt file defining the SVD compressed network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to compress',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def compress_weights(W, l):
"""Compress the weight matrix W of an inner product (fully connected) layer
using truncated SVD.
Parameters:
W: N x M weights matrix
l: number of singular values to retain
Returns:
Ul, L: matrices such that W \approx Ul*L
"""
# numpy doesn't seem to have a fast truncated SVD algorithm...
# this could be faster
U, s, V = np.linalg.svd(W, full_matrices=False)
Ul = U[:, :l]
sl = s[:l]
Vl = V[:l, :]
L = np.dot(np.diag(sl), Vl)
return Ul, L
def main():
args = parse_args()
# prototxt = 'models/VGG16/test.prototxt'
# caffemodel = 'snapshots/vgg16_fast_rcnn_iter_40000.caffemodel'
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
# prototxt_svd = 'models/VGG16/svd/test_fc6_fc7.prototxt'
# caffemodel = 'snapshots/vgg16_fast_rcnn_iter_40000.caffemodel'
net_svd = caffe.Net(args.prototxt_svd, args.caffemodel, caffe.TEST)
print('Uncompressed network {} : {}'.format(args.prototxt, args.caffemodel))
print('Compressed network prototxt {}'.format(args.prototxt_svd))
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svd'
out_dir = os.path.dirname(args.caffemodel)
# Compress fc6
if net_svd.params.has_key('fc6_L'):
l_fc6 = net_svd.params['fc6_L'][0].data.shape[0]
print(' fc6_L bottleneck size: {}'.format(l_fc6))
# uncompressed weights and biases
W_fc6 = net.params['fc6'][0].data
B_fc6 = net.params['fc6'][1].data
print(' compressing fc6...')
Ul_fc6, L_fc6 = compress_weights(W_fc6, l_fc6)
assert(len(net_svd.params['fc6_L']) == 1)
# install compressed matrix factors (and original biases)
net_svd.params['fc6_L'][0].data[...] = L_fc6
net_svd.params['fc6_U'][0].data[...] = Ul_fc6
net_svd.params['fc6_U'][1].data[...] = B_fc6
out += '_fc6_{}'.format(l_fc6)
# Compress fc7
if net_svd.params.has_key('fc7_L'):
l_fc7 = net_svd.params['fc7_L'][0].data.shape[0]
print ' fc7_L bottleneck size: {}'.format(l_fc7)
W_fc7 = net.params['fc7'][0].data
B_fc7 = net.params['fc7'][1].data
print(' compressing fc7...')
Ul_fc7, L_fc7 = compress_weights(W_fc7, l_fc7)
assert(len(net_svd.params['fc7_L']) == 1)
net_svd.params['fc7_L'][0].data[...] = L_fc7
net_svd.params['fc7_U'][0].data[...] = Ul_fc7
net_svd.params['fc7_U'][1].data[...] = B_fc7
out += '_fc7_{}'.format(l_fc7)
filename = '{}/{}.caffemodel'.format(out_dir, out)
net_svd.save(filename)
print 'Wrote svd model to: {:s}'.format(filename)
if __name__ == '__main__':
main()
| 3,918 | 30.103175 | 81 | py |
bottom-up-attention | bottom-up-attention-master/tools/read_tsv.py | #!/usr/bin/env python
import base64
import numpy as np
import csv
import sys
import zlib
import time
import mmap
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']
infile = '/data/coco/tsv/trainval/karpathy_val_resnet101_faster_rcnn_genome.tsv'
if __name__ == '__main__':
# Verify we can read a tsv
in_data = {}
with open(infile, "r+b") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['image_h'] = int(item['image_h'])
item['image_w'] = int(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in ['boxes', 'features']:
item[field] = np.frombuffer(base64.decodestring(item[field]),
dtype=np.float32).reshape((item['num_boxes'],-1))
in_data[item['image_id']] = item
break
print in_data
| 1,048 | 26.605263 | 85 | py |
bottom-up-attention | bottom-up-attention-master/tools/train_faster_rcnn_alt_opt.py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'faster_rcnn_alt_opt'
# Solver for each training stage
solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
[net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
[net_name, n, 'stage2_rpn_solver60k80k.pt'],
[net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 40000, 80000, 40000]
# max_iters = [100, 100, 100, 100]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
queue.put({'proposal_path': rpn_proposals_path})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 2
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
queue.put({'model_path': fast_rcnn_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage 1 Fast R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(fast_rcnn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rpn_stage2_out['model_path']),
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage2_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = os.path.join(
os.path.dirname(fast_rcnn_stage2_out['model_path']),
args.net_name + '_faster_rcnn_final.caffemodel')
print 'cp {} -> {}'.format(
fast_rcnn_stage2_out['model_path'], final_path)
shutil.copy(fast_rcnn_stage2_out['model_path'], final_path)
print 'Final model: {}'.format(final_path)
| 12,871 | 37.423881 | 80 | py |
bottom-up-attention | bottom-up-attention-master/tools/reval.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Reval = re-eval. Re-evaluate saved detections."""
import _init_paths
from fast_rcnn.test import apply_nms
from fast_rcnn.config import cfg
from datasets.factory import get_imdb
import cPickle
import os, sys, argparse
import numpy as np
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Re-evaluate results')
parser.add_argument('output_dir', nargs=1, help='results directory',
type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to re-evaluate',
default='voc_2007_test', type=str)
parser.add_argument('--matlab', dest='matlab_eval',
help='use matlab for evaluation',
action='store_true')
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--nms', dest='apply_nms', help='apply nms',
action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def from_dets(imdb_name, output_dir, args):
imdb = get_imdb(imdb_name)
imdb.competition_mode(args.comp_mode)
imdb.config['matlab_eval'] = args.matlab_eval
with open(os.path.join(output_dir, 'detections.pkl'), 'rb') as f:
dets = cPickle.load(f)
if args.apply_nms:
print 'Applying NMS to all detections'
nms_dets = apply_nms(dets, cfg.TEST.NMS)
else:
nms_dets = dets
print 'Evaluating detections'
imdb.evaluate_detections(nms_dets, output_dir)
if __name__ == '__main__':
args = parse_args()
output_dir = os.path.abspath(args.output_dir[0])
imdb_name = args.imdb_name
from_dets(imdb_name, output_dir, args)
| 2,126 | 30.746269 | 76 | py |
bottom-up-attention | bottom-up-attention-master/tools/test_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import _init_paths
from fast_rcnn.test import test_net,test_net_with_gt_boxes
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=400, type=int)
parser.add_argument('--rpn_file', dest='rpn_file',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, caffe.TEST, weights=args.caffemodel)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
if cfg.TEST.PROPOSAL_METHOD == 'rpn':
imdb.config['rpn_file'] = args.rpn_file
test_net(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
if cfg.TEST.HAS_ATTRIBUTES or cfg.TEST.HAS_RELATIONS:
net = caffe.Net(args.prototxt.replace(".prototxt","_gt.prototxt"), caffe.TEST, weights=args.caffemodel)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
test_net_with_gt_boxes(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
| 3,742 | 35.696078 | 111 | py |
bottom-up-attention | bottom-up-attention-master/tools/_init_paths.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Set up paths for Fast R-CNN."""
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
# Add caffe to PYTHONPATH
caffe_path = osp.join(this_dir, '..', 'caffe', 'python')
add_path(caffe_path)
# Add lib to PYTHONPATH
lib_path = osp.join(this_dir, '..', 'lib')
add_path(lib_path)
| 627 | 23.153846 | 58 | py |
bottom-up-attention | bottom-up-attention-master/tools/demo_rfcn.py | #!/usr/bin/env python
# --------------------------------------------------------
# R-FCN
# Copyright (c) 2016 Yuwen Xiong
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuwen Xiong
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'ResNet-101': ('ResNet-101',
'resnet101_rfcn_final.caffemodel'),
'ResNet-50': ('ResNet-50',
'resnet50_rfcn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4:8]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [ResNet-101]',
choices=NETS.keys(), default='ResNet-101')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'rfcn_end2end', 'test_agnostic.prototxt')
caffemodel = os.path.join(cfg.DATA_DIR, 'rfcn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\n').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
'001763.jpg', '004545.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
| 4,938 | 31.708609 | 85 | py |
bottom-up-attention | bottom-up-attention-master/tools/demo.py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import matplotlib
matplotlib.use('Agg')
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
plt.savefig(im_file.replace(".jpg", "_demo.jpg"))
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
'001763.jpg', '004545.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
| 5,123 | 31.846154 | 80 | py |
bottom-up-attention | bottom-up-attention-master/tools/train_svms.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
# Sanity check
scores_ret = (
X * 1.0 / self.feature_scale).dot(w.T * self.feature_scale) + b
assert np.allclose(scores, scores_ret[:, 0], atol=1e-5), \
"Scores from returned model don't match decision function"
return ((w * self.feature_scale, b), pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
| 13,480 | 37.081921 | 80 | py |
bottom-up-attention | bottom-up-attention-master/tools/train_net_multi_gpu.py | #!/usr/bin/env python
# --------------------------------------------------------
# Written by Bharat Singh
# Modified version of py-R-FCN
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of interest database."""
import _init_paths
from fast_rcnn.train_multi_gpu import get_training_roidb, train_net_multi_gpu
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import datasets.imdb
import caffe
import argparse
import pprint
import numpy as np
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument("--gpu_id", type=str, default='0',
help="List of device ids.")
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def combined_roidb(imdb_names):
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
imdb = datasets.imdb.imdb(imdb_names)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
gpu_id = args.gpu_id
gpu_list = gpu_id.split(',')
gpus = [int(i) for i in gpu_list]
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
#caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
imdb, roidb = combined_roidb(args.imdb_name)
print '{:d} roidb entries'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
train_net_multi_gpu(args.solver, roidb, output_dir,
pretrained_model=args.pretrained_model,
max_iter=args.max_iters, gpus=gpus)
| 3,684 | 32.5 | 78 | py |
bottom-up-attention | bottom-up-attention-master/tools/generate_tsv.py | #!/usr/bin/env python
"""Generate bottom-up attention features as a tsv file. Can use multiple gpus, each produces a
separate tsv file that can be merged later (e.g. by using merge_tsv function).
Modify the load_image_ids script as necessary for your data location. """
# Example:
# ./tools/generate_tsv.py --gpu 0,1,2,3,4,5,6,7 --cfg experiments/cfgs/faster_rcnn_end2end_resnet.yml --def models/vg/ResNet-101/faster_rcnn_end2end/test.prototxt --out test2014_resnet101_faster_rcnn_genome.tsv --net data/faster_rcnn_models/resnet101_faster_rcnn_final.caffemodel --split coco_test2014
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from fast_rcnn.test import im_detect,_get_blobs
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import caffe
import argparse
import pprint
import time, os, sys
import base64
import numpy as np
import cv2
import csv
from multiprocessing import Process
import random
import json
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']
# Settings for the number of features per image. To re-create pretrained features with 36 features
# per image, set both values to 36.
MIN_BOXES = 10
MAX_BOXES = 100
def load_image_ids(split_name):
''' Load a list of (path,image_id tuples). Modify this to suit your data locations. '''
split = []
if split_name == 'coco_test2014':
with open('/data/coco/annotations/image_info_test2014.json') as f:
data = json.load(f)
for item in data['images']:
image_id = int(item['id'])
filepath = os.path.join('/data/test2014/', item['file_name'])
split.append((filepath,image_id))
elif split_name == 'coco_test2015':
with open('/data/coco/annotations/image_info_test2015.json') as f:
data = json.load(f)
for item in data['images']:
image_id = int(item['id'])
filepath = os.path.join('/data/test2015/', item['file_name'])
split.append((filepath,image_id))
elif split_name == 'genome':
with open('/data/visualgenome/image_data.json') as f:
for item in json.load(f):
image_id = int(item['image_id'])
filepath = os.path.join('/data/visualgenome/', item['url'].split('rak248/')[-1])
split.append((filepath,image_id))
else:
print 'Unknown split'
return split
def get_detections_from_im(net, im_file, image_id, conf_thresh=0.2):
im = cv2.imread(im_file)
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
# Keep the original boxes, don't worry about the regresssion bbox outputs
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
blobs, im_scales = _get_blobs(im, None)
cls_boxes = rois[:, 1:5] / im_scales[0]
cls_prob = net.blobs['cls_prob'].data
pool5 = net.blobs['pool5_flat'].data
# Keep only the best detections
max_conf = np.zeros((rois.shape[0]))
for cls_ind in range(1,cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = np.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = np.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < MIN_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = np.argsort(max_conf)[::-1][:MAX_BOXES]
return {
'image_id': image_id,
'image_h': np.size(im, 0),
'image_w': np.size(im, 1),
'num_boxes' : len(keep_boxes),
'boxes': base64.b64encode(cls_boxes[keep_boxes]),
'features': base64.b64encode(pool5[keep_boxes])
}
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id(s) to use',
default='0', type=str)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to use',
default=None, type=str)
parser.add_argument('--out', dest='outfile',
help='output filepath',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--split', dest='data_split',
help='dataset to use',
default='karpathy_train', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def generate_tsv(gpu_id, prototxt, weights, image_ids, outfile):
# First check if file exists, and if it is complete
wanted_ids = set([int(image_id[1]) for image_id in image_ids])
found_ids = set()
if os.path.exists(outfile):
with open(outfile) as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames = FIELDNAMES)
for item in reader:
found_ids.add(int(item['image_id']))
missing = wanted_ids - found_ids
if len(missing) == 0:
print 'GPU {:d}: already completed {:d}'.format(gpu_id, len(image_ids))
else:
print 'GPU {:d}: missing {:d}/{:d}'.format(gpu_id, len(missing), len(image_ids))
if len(missing) > 0:
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net = caffe.Net(prototxt, caffe.TEST, weights=weights)
with open(outfile, 'ab') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter = '\t', fieldnames = FIELDNAMES)
_t = {'misc' : Timer()}
count = 0
for im_file,image_id in image_ids:
if int(image_id) in missing:
_t['misc'].tic()
writer.writerow(get_detections_from_im(net, im_file, image_id))
_t['misc'].toc()
if (count % 100) == 0:
print 'GPU {:d}: {:d}/{:d} {:.3f}s (projected finish: {:.2f} hours)' \
.format(gpu_id, count+1, len(missing), _t['misc'].average_time,
_t['misc'].average_time*(len(missing)-count)/3600)
count += 1
def merge_tsvs():
test = ['/work/data/tsv/test2015/resnet101_faster_rcnn_final_test.tsv.%d' % i for i in range(8)]
outfile = '/work/data/tsv/merged.tsv'
with open(outfile, 'ab') as tsvfile:
writer = csv.DictWriter(tsvfile, delimiter = '\t', fieldnames = FIELDNAMES)
for infile in test:
with open(infile) as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
for item in reader:
try:
writer.writerow(item)
except Exception as e:
print e
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
gpu_id = args.gpu_id
gpu_list = gpu_id.split(',')
gpus = [int(i) for i in gpu_list]
print('Using config:')
pprint.pprint(cfg)
assert cfg.TEST.HAS_RPN
image_ids = load_image_ids(args.data_split)
random.seed(10)
random.shuffle(image_ids)
# Split image ids between gpus
image_ids = [image_ids[i::len(gpus)] for i in range(len(gpus))]
caffe.init_log()
caffe.log('Using devices %s' % str(gpus))
procs = []
for i,gpu_id in enumerate(gpus):
outfile = '%s.%d' % (args.outfile, gpu_id)
p = Process(target=generate_tsv,
args=(gpu_id, args.prototxt, args.caffemodel, image_ids[i], outfile))
p.daemon = True
p.start()
procs.append(p)
for p in procs:
p.join()
| 8,584 | 35.688034 | 301 | py |
bottom-up-attention | bottom-up-attention-master/tools/eval_recall.py | #!/usr/bin/env python
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import argparse
import time, os, sys
import numpy as np
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--method', dest='method',
help='proposal method',
default='selective_search', type=str)
parser.add_argument('--rpn-file', dest='rpn_file',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
imdb = get_imdb(args.imdb_name)
imdb.set_proposal_method(args.method)
if args.rpn_file is not None:
imdb.config['rpn_file'] = args.rpn_file
candidate_boxes = None
if 0:
import scipy.io as sio
filename = 'debug/stage1_rpn_voc_2007_test.mat'
raw_data = sio.loadmat(filename)['aboxes'].ravel()
candidate_boxes = raw_data
ar, gt_overlaps, recalls, thresholds = \
imdb.evaluate_recall(candidate_boxes=candidate_boxes)
print 'Method: {}'.format(args.method)
print 'AverageRec: {:.3f}'.format(ar)
def recall_at(t):
ind = np.where(thresholds > t - 1e-5)[0][0]
assert np.isclose(thresholds[ind], t)
return recalls[ind]
print '[email protected]: {:.3f}'.format(recall_at(0.5))
print '[email protected]: {:.3f}'.format(recall_at(0.6))
print '[email protected]: {:.3f}'.format(recall_at(0.7))
print '[email protected]: {:.3f}'.format(recall_at(0.8))
print '[email protected]: {:.3f}'.format(recall_at(0.9))
# print again for easy spreadsheet copying
print '{:.3f}'.format(ar)
print '{:.3f}'.format(recall_at(0.5))
print '{:.3f}'.format(recall_at(0.6))
print '{:.3f}'.format(recall_at(0.7))
print '{:.3f}'.format(recall_at(0.8))
print '{:.3f}'.format(recall_at(0.9))
| 2,265 | 30.915493 | 77 | py |
bottom-up-attention | bottom-up-attention-master/tools/rpn_generate.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast/er/ R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Generate RPN proposals."""
import _init_paths
import numpy as np
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import cPickle
import caffe
import argparse
import pprint
import time, os, sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# RPN test settings
cfg.TEST.RPN_PRE_NMS_TOP_N = -1
cfg.TEST.RPN_POST_NMS_TOP_N = 2000
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
imdb = get_imdb(args.imdb_name)
imdb_boxes = imdb_proposals(net, imdb)
output_dir = get_output_dir(imdb, net)
rpn_file = os.path.join(output_dir, net.name + '_rpn_proposals.pkl')
with open(rpn_file, 'wb') as f:
cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_file)
| 2,994 | 31.554348 | 78 | py |
bottom-up-attention | bottom-up-attention-master/tools/train_rfcn_alt_opt_5stage.py | #!/usr/bin/env python
# --------------------------------------------------------
# R-FCN
# Copyright (c) 2016 Yuwen Xiong, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
"""Train a R-FCN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("R-FCN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals, imdb_rpn_compute_stats
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a R-FCN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ResNet-101")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--imdb_test', dest='imdb_test_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--model', dest='model_name',
help='folder name of model',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(imdb_name, net_name, model_name):
# R-FCN Alternating Optimization
# Solver for each training stage
if imdb_name.startswith('coco'):
solvers = [[net_name, model_name, 'stage1_rpn_solver360k480k.pt'],
[net_name, model_name, 'stage1_rfcn_ohem_solver360k480k.pt'],
[net_name, model_name, 'stage2_rpn_solver360k480k.pt'],
[net_name, model_name, 'stage2_rfcn_ohem_solver360k480k.pt'],
[net_name, model_name, 'stage3_rpn_solver360k480k.pt']]
solvers = [os.path.join('.', 'models', 'coco', *s) for s in solvers]
# Iterations for each training stage
max_iters = [480000, 480000, 480000, 480000, 480000]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
'.', 'models', 'coco', net_name, model_name, 'rpn_test.pt')
else:
solvers = [[net_name, model_name, 'stage1_rpn_solver60k80k.pt'],
[net_name, model_name, 'stage1_rfcn_ohem_solver80k120k.pt'],
[net_name, model_name, 'stage2_rpn_solver60k80k.pt'],
[net_name, model_name, 'stage2_rfcn_ohem_solver80k120k.pt'],
[net_name, model_name, 'stage3_rpn_solver60k80k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 120000, 80000, 120000, 80000]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, model_name, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, output_cache=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to R-FCN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
final_caffemodel = os.path.join(output_dir, output_cache)
if os.path.exists(final_caffemodel):
queue.put({'model_path': final_caffemodel})
else:
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
shutil.copyfile(rpn_model_path, final_caffemodel)
queue.put({'model_path': final_caffemodel})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = 6000 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 300 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
# Generate proposals on the imdb
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
if not os.path.exists(rpn_proposals_path):
rpn_proposals = imdb_proposals(rpn_net, imdb)
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
queue.put({'proposal_path': rpn_proposals_path})
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
def train_rfcn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None, output_cache=None):
"""Train a R-FCN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train R-FCN
# Send R-FCN model path over the multiprocessing queue
final_caffemodel = os.path.join(output_dir, output_cache)
if os.path.exists(final_caffemodel):
queue.put({'model_path': final_caffemodel})
else:
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rfcn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
shutil.copyfile(rfcn_model_path, final_caffemodel)
queue.put({'model_path': final_caffemodel})
def rpn_compute_stats(queue=None, imdb_name=None, cfg=None, rpn_test_prototxt=None):
"""Compute mean stds for anchors
"""
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to R-FCN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
roidb, imdb = get_roidb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
mean_file = os.path.join(imdb.cache_path, imdb.name + '_means.npy')
std_file = os.path.join(imdb.cache_path, imdb.name + '_stds.npy')
if os.path.exists(mean_file) and os.path.exists(std_file):
means = np.load(mean_file)
stds = np.load(std_file)
else:
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, caffe.TEST)
# Generate proposals on the imdb
print 'start computing means/stds, it may take several minutes...'
if imdb_name.startswith('coco'):
means, stds = imdb_rpn_compute_stats(rpn_net, imdb, anchor_scales=(4, 8, 16, 32))
else:
means, stds = imdb_rpn_compute_stats(rpn_net, imdb, anchor_scales=(8, 16, 32))
np.save(mean_file, means)
np.save(std_file, stds)
queue.put({'means': means, 'stds': stds})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.imdb_name, args.net_name, args.model_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 0 RPN, compute normalization means and stds'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_compute_stats, kwargs=mp_kwargs)
p.start()
stage0_anchor_stats = mp_queue.get()
p.join()
cfg.TRAIN.RPN_NORMALIZE_MEANS = stage0_anchor_stats['means']
cfg.TRAIN.RPN_NORMALIZE_STDS = stage0_anchor_stats['stds']
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg,
output_cache='stage1_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 R-FCN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'],
output_cache='stage1_rfcn_final.caffemodel')
p = mp.Process(target=train_rfcn, kwargs=mp_kwargs)
p.start()
rfcn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage1 R-FCN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rfcn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg,
output_cache='stage2_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 R-FCN using Stage-2 RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'],
output_cache='stage2_rfcn_final.caffemodel')
p = mp.Process(target=train_rfcn, kwargs=mp_kwargs)
p.start()
rfcn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 3 RPN, init from stage1 R-FCN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage3'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rfcn_stage2_out['model_path']),
solver=solvers[4],
max_iters=max_iters[4],
cfg=cfg,
output_cache='stage3_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage3_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 3 RPN, generate test proposals only'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage3_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage3_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print 'Final model: {}'.format(str(rfcn_stage2_out['model_path']))
print 'Final RPN: {}'.format(str(rpn_stage3_out['test_proposal_path']))
| 18,472 | 37.646444 | 103 | py |
bottom-up-attention | bottom-up-attention-master/tools/demo_vg.py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import matplotlib
matplotlib.use('Agg')
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ['__background__']
with open(os.path.join(cfg.DATA_DIR, 'vg/objects_vocab.txt')) as f:
for object in f.readlines():
CLASSES.append(object.lower().strip())
ATTRS = []
with open(os.path.join(cfg.DATA_DIR, 'vg/attributes_vocab.txt')) as f:
for attr in f.readlines():
ATTRS.append(attr.lower().strip())
RELATIONS = []
with open(os.path.join(cfg.DATA_DIR, 'vg/relations_vocab.txt')) as f:
for rel in f.readlines():
RELATIONS.append(rel.lower().strip())
NETS = ['VGG']
MODELS = [
'faster_rcnn_end2end',
'faster_rcnn_end2end_attr',
'faster_rcnn_end2end_attr_rel',
'faster_rcnn_end2end_attr_rel_softmax_primed',
'faster_rcnn_end2end_attr_softmax_primed'
]
def vis_detections(ax, class_name, dets, attributes, rel_argmax, rel_score, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
for i in inds:
bbox = dets[i, :4]
score = dets[i, 4]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
if attributes is not None:
att = np.argmax(attributes[i])
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f} ({:s})'.format(class_name, score, ATTRS[att]),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
else:
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
#print class_name
#print 'Outgoing relation: %s' % RELATIONS[np.argmax(rel_score[i])]
ax.set_title(('detections with '
'p(object | box) >= {:.1f}').format(thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo_tuples(net, image_name):
"""Detect objects, attributes and relations in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
if attr_scores is not None:
print 'Found attribute scores'
if rel_scores is not None:
print 'Found relation scores'
rel_scores = rel_scores[:,1:] # drop no relation
rel_argmax = np.argmax(rel_scores, axis=1).reshape((boxes.shape[0],boxes.shape[0]))
rel_score = np.max(rel_scores, axis=1).reshape((boxes.shape[0],boxes.shape[0]))
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.1
NMS_THRESH = 0.05
ATTR_THRESH = 0.1
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im)
# Detections
det_indices = []
det_scores = []
det_objects = []
det_bboxes = []
det_attrs = []
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = np.array(nms(dets, NMS_THRESH))
dets = dets[keep, :]
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
if len(inds) > 0:
keep = keep[inds]
for k in keep:
det_indices.append(k)
det_bboxes.append(cls_boxes[k])
det_scores.append(cls_scores[k])
det_objects.append(cls)
if attr_scores is not None:
attr_inds = np.where(attr_scores[k][1:] >= ATTR_THRESH)[0]
det_attrs.append([ATTRS[ix] for ix in attr_inds])
else:
det_attrs.append([])
rel_score = rel_score[det_indices].T[det_indices].T
rel_argmax = rel_argmax[det_indices].T[det_indices].T
for i,(idx,score,obj,bbox,attr) in enumerate(zip(det_indices,det_scores,det_objects,det_bboxes,det_attrs)):
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
box_text = '{:s} {:.3f}'.format(obj, score)
if len(attr) > 0:
box_text += "(" + ",".join(attr) + ")"
ax.text(bbox[0], bbox[1] - 2,
box_text,
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
# Outgoing
score = np.max(rel_score[i])
ix = np.argmax(rel_score[i])
subject = det_objects[ix]
relation = RELATIONS[rel_argmax[i][ix]]
print 'Relation: %.2f %s -> %s -> %s' % (score, obj, relation, subject)
# Incoming
score = np.max(rel_score.T[i])
ix = np.argmax(rel_score.T[i])
subject = det_objects[ix]
relation = RELATIONS[rel_argmax[ix][i]]
print 'Relation: %.2f %s -> %s -> %s' % (score, subject, relation, obj)
ax.set_title(('detections with '
'p(object|box) >= {:.1f}').format(CONF_THRESH),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
plt.savefig('data/demo/'+im_file.split('/')[-1].replace(".jpg", "_demo.jpg"))
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
#print 'relations'
#print rel_scores.shape
#rel_argmax = np.argsort(rel_scores, axis=1).reshape((boxes.shape[0],boxes.shape[0]))
#rel_score = np.max(rel_scores, axis=1).reshape((boxes.shape[0],boxes.shape[0]))
#print rel_argmax.shape
#print rel_score.shape
#print np.min(rel_score)
#print np.max(rel_score)
#np.savetxt('rel_score.csv', rel_score, delimiter=',')
#np.savetxt('rel_argmax.csv', rel_argmax, delimiter=',')
#print fail
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.4
NMS_THRESH = 0.3
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
if attr_scores is not None:
attributes = attr_scores[keep]
else:
attributes = None
if rel_scores is not None:
rel_argmax_c = rel_argmax[keep]
rel_score_c = rel_score[keep]
else:
rel_argmax_c = None
rel_score_c = None
vis_detections(ax, cls, dets, attributes, rel_argmax_c, rel_score_c, thresh=CONF_THRESH)
plt.savefig('data/demo/'+im_file.split('/')[-1].replace(".jpg", "_demo.jpg"))
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='net', help='Network to use, e.g. VGG16',
choices=NETS, default='VGG16')
parser.add_argument('--model', dest='model', help='Model to use, e.g. faster_rcnn_end2end',
choices=MODELS, default='faster_rcnn_end2end_attr_rel_softmax_primed')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.ROOT_DIR, 'models/vg', args.net, args.model, 'test.prototxt')
caffemodel = os.path.join(cfg.ROOT_DIR, 'output/faster_rcnn_end2end/vg_train/vgg16_faster_rcnn_attr_rel_softmax_primed_heatmap_iter_250000.caffemodel')
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _, _, _= im_detect(net, im)
im_names = ['demo/000456.jpg',
'demo/000542.jpg',
'demo/001150.jpg',
'demo/001763.jpg',
'demo/004545.jpg',
'demo/2587.jpg',
'demo/2985.jpg',
'demo/3067.jpg',
'demo/3761.jpg',
'vg/VG_100K_2/2404579.jpg',
'vg/VG_100K/2323401.jpg',
'vg/VG_100K_2/2415196.jpg',
'vg/VG_100K_2/2403358.jpg',
'vg/VG_100K_2/2380967.jpg',
'vg/VG_100K_2/2393625.jpg',
'vg/VG_100K/2321134.jpg',
'vg/VG_100K/2319899.jpg',
'vg/VG_100K/1592589.jpg',
'vg/VG_100K_2/2400441.jpg',
'vg/VG_100K/2374686.jpg',
'vg/VG_100K/2372269.jpg',
'vg/VG_100K_2/2378526.jpg',
'vg/VG_100K_2/2403861.jpg',
]
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for {}'.format(im_name)
demo_tuples(net, im_name)
plt.show()
| 11,553 | 34.550769 | 155 | py |
bottom-up-attention | bottom-up-attention-master/tools/train_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network on a region of interest database."""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import datasets.imdb
import caffe
import argparse
import pprint
import numpy as np
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def combined_roidb(imdb_names):
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
imdb = datasets.imdb.imdb(imdb_names)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
imdb, roidb = combined_roidb(args.imdb_name)
print '{:d} roidb entries'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
train_net(args.solver, roidb, output_dir,
pretrained_model=args.pretrained_model,
max_iters=args.max_iters)
| 3,747 | 32.168142 | 78 | py |
bottom-up-attention | bottom-up-attention-master/data/genome/create_splits.py | #!/usr/bin/python
''' Determine visual genome data splits to avoid contamination of COCO splits.'''
import argparse
import os
import random
from random import shuffle
import shutil
import subprocess
import sys
import json
random.seed(10) # Make dataset splits repeatable
CURDIR = os.path.dirname(os.path.realpath(__file__))
# The root directory which holds all information of the dataset.
splitDir = 'data/genome'
dataDir = 'data/vg'
train_list_file = "{}/train.txt".format(CURDIR)
val_list_file = "{}/val.txt".format(CURDIR)
test_list_file = "{}/test.txt".format(CURDIR)
# First determine train, val, test splits (x, 5000, 5000)
train = set()
val = set()
test = set()
# Load coco test ids
coco_test_ids = set()
with open(os.path.join(splitDir, 'coco_splits/image_info_test2014.json')) as f:
coco_data = json.load(f)
for item in coco_data['images']:
coco_test_ids.add(item['id'])
print "There are %d coco test images" % len(coco_test_ids)
# Load karpathy coco splits
karpathy_train = set()
with open(os.path.join(splitDir, 'coco_splits/karpathy_train_images.txt')) as f:
for line in f.readlines():
image_id=int(line.split('.')[0].split('_')[-1])
karpathy_train.add(image_id)
karpathy_val = set()
with open(os.path.join(splitDir, 'coco_splits/karpathy_val_images.txt')) as f:
for line in f.readlines():
image_id=int(line.split('.')[0].split('_')[-1])
karpathy_val.add(image_id)
karpathy_test = set()
with open(os.path.join(splitDir, 'coco_splits/karpathy_test_images.txt')) as f:
for line in f.readlines():
image_id=int(line.split('.')[0].split('_')[-1])
karpathy_test.add(image_id)
print "Karpathy splits are %d, %d, %d (train, val, test)" % (len(karpathy_train), len(karpathy_val), len(karpathy_test))
# Load VG image metadata
coco_ids = set()
with open(os.path.join(dataDir, 'image_data.json')) as f:
metadata = json.load(f)
for item in metadata:
if item['coco_id']:
coco_ids.add(item['coco_id'])
print "Found %d visual genome images claiming coco ids" % len(coco_ids)
print "Overlap with COCO test is %d" % len(coco_test_ids & coco_ids)
print "Overlap with Karpathy train is %d" % len(karpathy_train & coco_ids)
print "Overlap with Karpathy val is %d" % len(karpathy_val & coco_ids)
print "Overlap with Karpathy test is %d" % len(karpathy_test & coco_ids)
# Output
#There are 40775 coco test images
#Karpathy splits are 113287, 5000, 5000 (train, val, test)
#Found 51208 visual genome images claiming coco ids
#Overlap with COCO test is 0
#Overlap with Karpathy train is 46944
#Overlap with Karpathy val is 2126
#Overlap with Karpathy test is 2138
# Determine splits
remainder = []
for item in metadata:
if item['coco_id']:
if item['coco_id'] in karpathy_train:
train.add(item['image_id'])
elif item['coco_id'] in karpathy_val:
val.add(item['image_id'])
elif item['coco_id'] in karpathy_test:
test.add(item['image_id'])
else:
remainder.append(item['image_id'])
else:
remainder.append(item['image_id'])
shuffle(remainder)
while len(test) < 5000:
test.add(remainder.pop())
while len(val) < 5000:
val.add(remainder.pop())
train |= set(remainder)
assert len(test) == 5000
assert len(val) == 5000
assert len(train) == len(metadata) - 10000
# Create train, val and test set.
for outfile, split in zip([train_list_file, val_list_file, test_list_file], [train, val, test]):
if not os.path.exists(outfile):
img_files = []
anno_files = []
img_id = []
img_h = []
img_w = []
for item in metadata:
if item['image_id'] in split:
url = item['url'].split('/')
img_file = "{}/{}".format(url[-2],url[-1])
anno_file = "xml/{}".format(url[-1].replace(".jpg",".xml"))
img_files.append(img_file)
anno_files.append(anno_file)
img_id.append(item['image_id'])
img_h.append(item['height'])
img_w.append(item['width'])
# Shuffle the images.
idx = [i for i in xrange(len(img_files))]
shuffle(idx)
# Save splits
with open(outfile, "w") as f:
for i in idx:
f.write("{} {}\n".format(img_files[i], anno_files[i]))
| 4,163 | 29.844444 | 120 | py |
bottom-up-attention | bottom-up-attention-master/data/genome/setup_vg.py | #!/usr/bin/python
''' Visual genome data analysis and preprocessing.'''
import json
import os
import operator
from visual_genome_python_driver import local as vg
from collections import Counter, defaultdict
import xml.etree.cElementTree as ET
from xml.dom import minidom
dataDir = './data/vg'
outDir = 'data/genome'
# Set maximum values for number of object / attribute / relation classes,
# filter it further later
max_objects = 2500
max_attributes = 1000
max_relations = 500
common_attributes = set(['white','black','blue','green','red','brown','yellow',
'small','large','silver','wooden','orange','gray','grey','metal','pink','tall',
'long','dark'])
def clean_string(string):
string = string.lower().strip()
if len(string) >= 1 and string[-1] == '.':
return string[:-1].strip()
return string
def clean_objects(string, common_attributes):
''' Return object and attribute lists '''
string = clean_string(string)
words = string.split()
if len(words) > 1:
prefix_words_are_adj = True
for att in words[:-1]:
if not att in common_attributes:
prefix_words_are_adj = False
if prefix_words_are_adj:
return words[-1:],words[:-1]
else:
return [string],[]
else:
return [string],[]
def clean_attributes(string):
''' Return attribute list '''
string = clean_string(string)
if string == "black and white":
return [string]
else:
return [word.lower().strip() for word in string.split(" and ")]
def clean_relations(string):
string = clean_string(string)
if len(string) > 0:
return [string]
else:
return []
def prettify(elem):
''' Return a pretty-printed XML string for the Element '''
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def build_vocabs_and_xml():
objects = Counter()
attributes = Counter()
relations = Counter()
with open(os.path.join(dataDir, 'scene_graphs.json')) as f:
data = json.load(f)
# First extract attributes and relations
for sg in data:
for attr in sg['attributes']:
try:
attributes.update(clean_attributes(attr['attribute']['attributes'][0]))
except:
pass
for rel in sg['relationships']:
relations.update(clean_relations(rel['predicate']))
# Now extract objects, while looking for common adjectives that will be repurposed
# as attributes
for sg in data:
for obj in sg['objects']:
o,a = clean_objects(obj['names'][0], common_attributes)
objects.update(o)
attributes.update(a)
with open(os.path.join(outDir, "objects_count.txt"), "w") as text_file:
for k,v in sorted(objects.iteritems(), key=operator.itemgetter(1), reverse=True):
text_file.write("%s\t%d\n" % (k.encode('utf-8'),v))
with open(os.path.join(outDir, "attributes_count.txt"), "w") as text_file:
for k,v in sorted(attributes.iteritems(), key=operator.itemgetter(1), reverse=True):
text_file.write("%s\t%d\n" % (k.encode('utf-8'),v))
with open(os.path.join(outDir, "relations_count.txt"), "w") as text_file:
for k,v in sorted(relations.iteritems(), key=operator.itemgetter(1), reverse=True):
text_file.write("%s\t%d\n" % (k.encode('utf-8'),v))
# Create full-sized vocabs
objects = set([k for k,v in objects.most_common(max_objects)])
attributes = set([k for k,v in attributes.most_common(max_attributes)])
relations = set([k for k,v in relations.most_common(max_relations)])
with open(os.path.join(outDir, "objects_vocab_%s.txt" % max_objects), "w") as text_file:
for item in objects:
text_file.write("%s\n" % item)
with open(os.path.join(outDir, "attributes_vocab_%s.txt" % max_attributes), "w") as text_file:
for item in attributes:
text_file.write("%s\n" % item)
with open(os.path.join(outDir, "relations_vocab_%s.txt" % max_relations), "w") as text_file:
for item in relations:
text_file.write("%s\n" % item)
# Load image metadata
metadata = {}
with open(os.path.join(dataDir, 'image_data.json')) as f:
for item in json.load(f):
metadata[item['image_id']] = item
# Output clean xml files, one per image
out_folder = 'xml'
if not os.path.exists(os.path.join(outDir, out_folder)):
os.mkdir(os.path.join(outDir, out_folder))
for sg in data:
ann = ET.Element("annotation")
meta = metadata[sg["image_id"]]
assert sg["image_id"] == meta["image_id"]
url_split = meta["url"].split("/")
ET.SubElement(ann, "folder").text = url_split[-2]
ET.SubElement(ann, "filename").text = url_split[-1]
source = ET.SubElement(ann, "source")
ET.SubElement(source, "database").text = "Visual Genome Version 1.2"
ET.SubElement(source, "image_id").text = str(meta["image_id"])
ET.SubElement(source, "coco_id").text = str(meta["coco_id"])
ET.SubElement(source, "flickr_id").text = str(meta["flickr_id"])
size = ET.SubElement(ann, "size")
ET.SubElement(size, "width").text = str(meta["width"])
ET.SubElement(size, "height").text = str(meta["height"])
ET.SubElement(size, "depth").text = "3"
ET.SubElement(ann, "segmented").text = "0"
object_set = set()
for obj in sg['objects']:
o,a = clean_objects(obj['names'][0], common_attributes)
if o[0] in objects:
ob = ET.SubElement(ann, "object")
ET.SubElement(ob, "name").text = o[0]
ET.SubElement(ob, "object_id").text = str(obj["object_id"])
object_set.add(obj["object_id"])
ET.SubElement(ob, "difficult").text = "0"
bbox = ET.SubElement(ob, "bndbox")
ET.SubElement(bbox, "xmin").text = str(obj["x"])
ET.SubElement(bbox, "ymin").text = str(obj["y"])
ET.SubElement(bbox, "xmax").text = str(obj["x"] + obj["w"])
ET.SubElement(bbox, "ymax").text = str(obj["y"] + obj["h"])
attribute_set = set()
for attribute_name in a:
if attribute_name in attributes:
attribute_set.add(attribute_name)
for attr in sg['attributes']:
if attr["attribute"]["object_id"] == obj["object_id"]:
try:
for ix in attr['attribute']['attributes']:
for clean_attribute in clean_attributes(ix):
if clean_attribute in attributes:
attribute_set.add(clean_attribute)
except:
pass
for attribute_name in attribute_set:
ET.SubElement(ob, "attribute").text = attribute_name
for rel in sg['relationships']:
predicate = clean_string(rel["predicate"])
if rel["subject_id"] in object_set and rel["object_id"] in object_set:
if predicate in relations:
re = ET.SubElement(ann, "relation")
ET.SubElement(re, "subject_id").text = str(rel["subject_id"])
ET.SubElement(re, "object_id").text = str(rel["object_id"])
ET.SubElement(re, "predicate").text = predicate
outFile = url_split[-1].replace(".jpg",".xml")
tree = ET.ElementTree(ann)
if len(tree.findall('object')) > 0:
tree.write(os.path.join(outDir, out_folder, outFile))
if __name__ == "__main__":
# First, use visual genome library to merge attributes and scene graphs
vg.AddAttrsToSceneGraphs(dataDir=dataDir)
# Next, build xml files
build_vocabs_and_xml()
| 7,416 | 34.319048 | 96 | py |
bottom-up-attention | bottom-up-attention-master/data/genome/visual_genome_python_driver/local.py | from models import Image, Object, Attribute, Relationship
from models import Region, Graph, QA, QAObject, Synset
import httplib
import json
import utils
import os, gc
"""
Get Image ids from startIndex to endIndex.
"""
def GetAllImageData(dataDir=None):
if dataDir is None:
dataDir = utils.GetDataDir()
dataFile = os.path.join(dataDir, 'image_data.json')
data = json.load(open(dataFile))
return [utils.ParseImageData(image) for image in data]
"""
Get all region descriptions.
"""
def GetAllRegionDescriptions(dataDir=None):
if dataDir is None:
dataDir = utils.GetDataDir()
dataFile = os.path.join(dataDir, 'region_descriptions.json')
imageData = GetAllImageData(dataDir)
imageMap = {}
for d in imageData:
imageMap[d.id] = d
images = json.load(open(dataFile))
output = []
for image in images:
output.append(utils.ParseRegionDescriptions(image['regions'], imageMap[image['id']]))
return output
"""
Get all question answers.
"""
def GetAllQAs(dataDir=None):
if dataDir is None:
dataDir = utils.GetDataDir()
dataFile = os.path.join(dataDir, 'question_answers.json')
imageData = GetAllImageData(dataDir)
imageMap = {}
for d in imageData:
imageMap[d.id] = d
images = json.load(open(dataFile))
output = []
for image in images:
output.append(utils.ParseQA(image['qas'], imageMap))
return output
# --------------------------------------------------------------------------------------------------
# GetSceneGraphs and sub-methods
"""
Load a single scene graph from a .json file.
"""
def GetSceneGraph(image_id, images='data/', imageDataDir='data/by-id/', synsetFile='data/synsets.json'):
if type(images) is str:
# Instead of a string, we can pass this dict as the argument `images`
images = {img.id:img for img in GetAllImageData(images)}
fname = str(image_id) + '.json'
image = images[image_id]
data = json.load(open(imageDataDir + fname, 'r'))
scene_graph = ParseGraphLocal(data, image)
scene_graph = InitSynsets(scene_graph, synsetFile)
return scene_graph
"""
Get scene graphs given locally stored .json files; requires `SaveSceneGraphsById`.
startIndex, endIndex : get scene graphs listed by image, from startIndex through endIndex
dataDir : directory with `image_data.json` and `synsets.json`
imageDataDir : directory of scene graph jsons saved by image id (see `SaveSceneGraphsById`)
minRels, maxRels: only get scene graphs with at least / less than this number of relationships
"""
def GetSceneGraphs(startIndex=0, endIndex=-1,
dataDir='data/visualgenome/', imageDataDir='data/visualgenome/by-id/',
minRels=0, maxRels=100):
images = {img.id:img for img in GetAllImageData(dataDir)}
scene_graphs = []
img_fnames = os.listdir(imageDataDir)
if (endIndex < 1): endIndex = len(img_fnames)
for fname in img_fnames[startIndex : endIndex]:
image_id = int(fname.split('.')[0])
scene_graph = GetSceneGraph(image_id, images, imageDataDir, dataDir+'synsets.json')
n_rels = len(scene_graph.relationships)
if (minRels <= n_rels <= maxRels):
scene_graphs.append(scene_graph)
return scene_graphs
"""
Use object ids as hashes to `src.models.Object` instances. If item not
in table, create new `Object`. Used when building scene graphs from json.
"""
def MapObject(object_map, obj):
oid = obj['object_id']
obj['id'] = oid
del obj['object_id']
if oid in object_map:
object_ = object_map[oid]
else:
if 'attributes' in obj:
attrs = obj['attributes']
del obj['attributes']
else:
attrs = []
if 'w' in obj:
obj['width'] = obj['w']
obj['height'] = obj['h']
del obj['w'], obj['h']
object_ = Object(**obj)
object_.attributes = attrs
object_map[oid] = object_
return object_map, object_
"""
Modified version of `utils.ParseGraph`.
"""
global count_skips
count_skips = [0,0]
def ParseGraphLocal(data, image, verbose=False):
global count_skips
objects = []
object_map = {}
relationships = []
attributes = []
for obj in data['objects']:
object_map, o_ = MapObject(object_map, obj)
objects.append(o_)
for rel in data['relationships']:
if rel['subject_id'] in object_map and rel['object_id'] in object_map:
object_map, s = MapObject(object_map, {'object_id': rel['subject_id']})
v = rel['predicate']
object_map, o = MapObject(object_map, {'object_id': rel['object_id']})
rid = rel['relationship_id']
relationships.append(Relationship(rid, s, v, o, rel['synsets']))
else:
# Skip this relationship if we don't have the subject and object in
# the object_map for this scene graph. Some data is missing in this way.
count_skips[0] += 1
if 'attributes' in data:
for attr in data['attributes']:
a = attr['attribute']
if a['object_id'] in object_map:
attributes.append(Attribute(attr['attribute_id'], a['object_id'], a['names'], a['synsets']))
else:
count_skips[1] += 1
if verbose:
print 'Skipped {} rels, {} attrs total'.format(*count_skips)
return Graph(image, objects, relationships, attributes)
"""
Convert synsets in a scene graph from strings to Synset objects.
"""
def InitSynsets(scene_graph, synset_file):
syn_data = json.load(open(synset_file, 'r'))
syn_class = {s['synset_name'] : Synset(s['synset_name'], s['synset_definition']) for s in syn_data}
for obj in scene_graph.objects:
obj.synsets = [syn_class[sn] for sn in obj.synsets]
for rel in scene_graph.relationships:
rel.synset = [syn_class[sn] for sn in rel.synset]
for attr in scene_graph.attributes:
obj.synset = [syn_class[sn] for sn in attr.synset]
return scene_graph
# --------------------------------------------------------------------------------------------------
# This is a pre-processing step that only needs to be executed once.
# You can download .jsons segmented with these methods from:
# https://drive.google.com/file/d/0Bygumy5BKFtcQ1JrcFpyQWdaQWM
"""
Save a separate .json file for each image id in `imageDataDir`.
Notes
-----
- If we don't save .json's by id, `scene_graphs.json` is >6G in RAM
- Separated .json files are ~1.1G on disk
- Run `AddAttrsToSceneGraphs` before `ParseGraphLocal` will work
- Attributes are only present in objects, and do not have synset info
Each output .json has the following keys:
- "id"
- "objects"
- "relationships"
"""
def SaveSceneGraphsById(dataDir='data/visualgenome/', imageDataDir='data/visualgenome/by-id/'):
if not os.path.exists(imageDataDir): os.mkdir(imageDataDir)
all_data = json.load(open(os.path.join(dataDir,'scene_graphs.json')))
for sg_data in all_data:
img_fname = str(sg_data['image_id']) + '.json'
with open(os.path.join(imageDataDir, img_fname), 'w') as f:
json.dump(sg_data, f)
del all_data
gc.collect() # clear memory
"""
Add attributes to `scene_graph.json`, extracted from `attributes.json`.
This also adds a unique id to each attribute, and separates individual
attibutes for each object (these are grouped in `attributes.json`).
"""
def AddAttrsToSceneGraphs(dataDir='data/visualgenome/'):
attr_data = json.load(open(os.path.join(dataDir, 'attributes.json')))
with open(os.path.join(dataDir, 'scene_graphs.json')) as f:
sg_dict = {sg['image_id']:sg for sg in json.load(f)}
id_count = 0
for img_attrs in attr_data:
attrs = []
for attribute in img_attrs['attributes']:
a = img_attrs.copy(); del a['attributes']
a['attribute'] = attribute
a['attribute_id'] = id_count
attrs.append(a)
id_count += 1
iid = img_attrs['image_id']
sg_dict[iid]['attributes'] = attrs
with open(os.path.join(dataDir, 'scene_graphs.json'), 'w') as f:
json.dump(sg_dict.values(), f)
del attr_data, sg_dict
gc.collect()
# --------------------------------------------------------------------------------------------------
# For info on VRD dataset, see:
# http://cs.stanford.edu/people/ranjaykrishna/vrd/
def GetSceneGraphsVRD(json_file='data/vrd/json/test.json'):
"""
Load VRD dataset into scene graph format.
"""
scene_graphs = []
with open(json_file,'r') as f:
D = json.load(f)
scene_graphs = [ParseGraphVRD(d) for d in D]
return scene_graphs
def ParseGraphVRD(d):
image = Image(d['photo_id'], d['filename'], d['width'], d['height'], '', '')
id2obj = {}
objs = []
rels = []
atrs = []
for i,o in enumerate(d['objects']):
b = o['bbox']
obj = Object(i, b['x'], b['y'], b['w'], b['h'], o['names'], [])
id2obj[i] = obj
objs.append(obj)
for j,a in enumerate(o['attributes']):
atrs.append(Attribute(j, obj, a['attribute'], []))
for i,r in enumerate(d['relationships']):
s = id2obj[r['objects'][0]]
o = id2obj[r['objects'][1]]
v = r['relationship']
rels.append(Relationship(i, s, v, o, []))
return Graph(image, objs, rels, atrs)
| 8,938 | 30.038194 | 104 | py |
bottom-up-attention | bottom-up-attention-master/data/genome/visual_genome_python_driver/utils.py | from models import Image, Object, Attribute, Relationship
from models import Region, Graph, QA, QAObject, Synset
import httplib
import json
"""
Get the local directory where the Visual Genome data is locally stored.
"""
def GetDataDir():
from os.path import dirname, realpath, join
dataDir = join(dirname(realpath('__file__')), 'data')
return dataDir
"""
Helper Method used to get all data from request string.
"""
def RetrieveData(request):
connection = httplib.HTTPConnection("visualgenome.org", '443')
connection.request("GET", request)
response = connection.getresponse()
jsonString = response.read()
data = json.loads(jsonString)
return data
"""
Helper to Extract Synset from canon object.
"""
def ParseSynset(canon):
if len(canon) == 0:
return None
return Synset(canon[0]['synset_name'], canon[0]['synset_definition'])
"""
Helper to parse a Graph object from API data.
"""
def ParseGraph(data, image):
objects = []
object_map = {}
relationships = []
attributes = []
# Create the Objects
for obj in data['bounding_boxes']:
names = []
synsets = []
for s in obj['boxed_objects']:
names.append(s['name'])
synsets.append(ParseSynset(s['object_canon']))
object_ = Object(obj['id'], obj['x'], obj['y'], obj['width'], obj['height'], names, synsets)
object_map[obj['id']] = object_
objects.append(object_)
# Create the Relationships
for rel in data['relationships']:
relationships.append(Relationship(rel['id'], object_map[rel['subject']], \
rel['predicate'], object_map[rel['object']], ParseSynset(rel['relationship_canon'])))
# Create the Attributes
for atr in data['attributes']:
attributes.append(Attribute(atr['id'], object_map[atr['subject']], \
atr['attribute'], ParseSynset(atr['attribute_canon'])))
return Graph(image, objects, relationships, attributes)
"""
Helper to parse the image data for one image.
"""
def ParseImageData(data):
img_id = data['id'] if 'id' in data else data['image_id']
url = data['url']
width = data['width']
height = data['height']
coco_id = data['coco_id']
flickr_id = data['flickr_id']
image = Image(img_id, url, width, height, coco_id, flickr_id)
return image
"""
Helper to parse region descriptions.
"""
def ParseRegionDescriptions(data, image):
regions = []
if data[0].has_key('region_id'):
region_id_key = 'region_id'
else:
region_id_key = 'id'
for d in data:
regions.append(Region(d[region_id_key], image, d['phrase'], d['x'], d['y'], d['width'], d['height']))
return regions
"""
Helper to parse a list of question answers.
"""
def ParseQA(data, image_map):
qas = []
for d in data:
qos = []
aos = []
if 'question_objects' in d:
for qo in d['question_objects']:
synset = Synset(qo['synset_name'], qo['synset_definition'])
qos.append(QAObject(qo['entity_idx_start'], qo['entity_idx_end'], qo['entity_name'], synset))
if 'answer_objects' in d:
for ao in d['answer_objects']:
synset = Synset(o['synset_name'], ao['synset_definition'])
aos.append(QAObject(ao['entity_idx_start'], ao['entity_idx_end'], ao['entity_name'], synset))
qas.append(QA(d['qa_id'], image_map[d['image_id']], d['question'], d['answer'], qos, aos))
return qas
| 3,292 | 30.361905 | 105 | py |
bottom-up-attention | bottom-up-attention-master/data/genome/visual_genome_python_driver/api.py | from models import Image, Object, Attribute, Relationship
from models import Region, Graph, QA, QAObject, Synset
import httplib
import json
import utils
"""
Get all Image ids.
"""
def GetAllImageIds():
page = 1
next = '/api/v0/images/all?page=' + str(page)
ids = []
while True:
data = utils.RetrieveData(next)
ids.extend(data['results'])
if data['next'] is None:
break
page += 1
next = '/api/v0/images/all?page=' + str(page)
return ids
"""
Get Image ids from startIndex to endIndex.
"""
def GetImageIdsInRange(startIndex=0, endIndex=99):
idsPerPage = 1000
startPage = startIndex / idsPerPage + 1
endPage = endIndex / idsPerPage + 1
ids = []
for page in range(startPage, endPage+1):
data = utils.RetrieveData('/api/v0/images/all?page=' + str(page))
ids.extend(data['results'])
ids = ids[startIndex % 100:]
ids = ids[:endIndex-startIndex+1]
return ids
"""
Get data about an image.
"""
def GetImageData(id=61512):
data = utils.RetrieveData('/api/v0/images/' + str(id))
if 'detail' in data and data['detail'] == 'Not found.':
return None
image = utils.ParseImageData(data)
return image
"""
Get the region descriptions of an image.
"""
def GetRegionDescriptionsOfImage(id=61512):
image = GetImageData(id=id)
data = utils.RetrieveData('/api/v0/images/' + str(id) + '/regions')
if 'detail' in data and data['detail'] == 'Not found.':
return None
return utils.ParseRegionDescriptions(data, image)
"""
Get Region Graph of a particular Region in an image.
"""
def GetRegionGraphOfRegion(image_id=61512, region_id=1):
image = GetImageData(id=image_id)
data = utils.RetrieveData('/api/v0/images/' + str(image_id) + '/regions/' + str(region_id))
if 'detail' in data and data['detail'] == 'Not found.':
return None
return utils.ParseGraph(data[0], image)
"""
Get Scene Graph of an image.
"""
def GetSceneGraphOfImage(id=61512):
image = GetImageData(id=id)
data = utils.RetrieveData('/api/v0/images/' + str(id) + '/graph')
if 'detail' in data and data['detail'] == 'Not found.':
return None
return utils.ParseGraph(data, image)
"""
Gets all the QA from the dataset.
qtotal int total number of QAs to return. Set to None if all QAs should be returned
"""
def GetAllQAs(qtotal=100):
page = 1
next = '/api/v0/qa/all?page=' + str(page)
qas = []
image_map = {}
while True:
data = utils.RetrieveData(next)
for d in data['results']:
if d['image'] not in image_map:
image_map[d['image']] = GetImageData(id=d['image'])
qas.extend(utils.ParseQA(data['results'], image_map))
if qtotal is not None and len(qas) > qtotal:
return qas
if data['next'] is None:
break
page += 1
next = '/api/v0/qa/all?page=' + str(page)
return qas
"""
Get all QA's of a particular type - example, 'why'
qtype string possible values: what, where, when, why, who, how.
qtotal int total number of QAs to return. Set to None if all QAs should be returned
"""
def GetQAofType(qtype='why', qtotal=100):
page = 1
next = '/api/v0/qa/' + qtype + '?page=' + str(page)
qas = []
image_map = {}
while True:
data = utils.RetrieveData(next)
for d in data['results']:
if d['image'] not in image_map:
image_map[d['image']] = GetImageData(id=d['image'])
qas.extend(utils.ParseQA(data['results'], image_map))
if qtotal is not None and len(qas) > qtotal:
return qas
if data['next'] is None:
break
page += 1
next = '/api/v0/qa/' + qtype + '?page=' + str(page)
return qas
"""
Get all QAs for a particular image.
"""
def GetQAofImage(id=61512):
page = 1
next = '/api/v0/image/' + str(id) + '/qa?page=' + str(page)
qas = []
image_map = {}
while True:
data = utils.RetrieveData(next)
for d in data['results']:
if d['image'] not in image_map:
image_map[d['image']] = GetImageData(id=d['image'])
qas.extend(utils.ParseQA(data['results'], image_map))
if data['next'] is None:
break
page += 1
next = '/api/v0/image/' + str(id) + '/qa?page=' + str(page)
return qas
| 4,121 | 27.427586 | 93 | py |
bottom-up-attention | bottom-up-attention-master/data/genome/visual_genome_python_driver/models.py | """
Visual Genome Python API wrapper, models
"""
"""
Image.
ID int
url hyperlink string
width int
height int
"""
class Image:
def __init__(self, id, url, width, height, coco_id, flickr_id):
self.id = id
self.url = url
self.width = width
self.height = height
self.coco_id = coco_id
self.flickr_id = flickr_id
def __str__(self):
return 'id: %d, coco_id: %d, flickr_id: %d, width: %d, url: %s' \
% (self.id, -1 if self.coco_id is None else self.coco_id, -1 if self.flickr_id is None else self.flickr_id, self.width, self.url)
def __repr__(self):
return str(self)
"""
Region.
image int
phrase string
x int
y int
width int
height int
"""
class Region:
def __init__(self, id, image, phrase, x, y, width, height):
self.id = id
self.image = image
self.phrase = phrase
self.x = x
self.y = y
self.width = width
self.height = height
def __str__ (self):
return 'id: %d, x: %d, y: %d, width: %d, height: %d, phrase: %s, image: %d' % \
(self.id, self.x, self.y, self.width, self.height, self.phrase, self.image.id)
def __repr__(self):
return str(self)
"""
Graphs contain objects, relationships and attributes
image Image
bboxes Object array
relationships Relationship array
attributes Attribute array
"""
class Graph:
def __init__(self, image, objects, relationships, attributes):
self.image = image
self.objects = objects
self.relationships = relationships
self.attributes = attributes
"""
Objects.
id int
x int
y int
width int
height int
names string array
synsets Synset array
"""
class Object:
def __init__(self, id, x, y, width, height, names, synsets):
self.id = id
self.x = x
self.y = y
self.width = width
self.height = height
self.names = names
self.synsets = synsets
def __str__(self):
name = self.names[0] if len(self.names) != 0 else 'None'
return '%s' % (name)
def __repr__(self):
return str(self)
"""
Relationships. Ex, 'man - jumping over - fire hydrant'.
subject int
predicate string
object int
rel_canon Synset
"""
class Relationship:
def __init__(self, id, subject, predicate, object, synset):
self.id = id
self.subject = subject
self.predicate = predicate
self.object = object
self.synset = synset
def __str__(self):
return "%d: %s - %s - %s" % (self.id, self.subject, self.predicate, self.object)
def __repr__(self):
return str(self)
"""
Attributes. Ex, 'man - old'.
subject Object
attribute string
synset Synset
"""
class Attribute:
def __init__(self, id, subject, attribute, synset):
self.id = id
self.subject = subject
self.attribute = attribute
self.synset = synset
def __str__(self):
return "%d: %s - %s" % (self.id, self.subject, self.attribute)
def __repr__(self):
return str(self)
"""
Question Answer Pairs.
ID int
image int
question string
answer string
q_objects QAObject array
a_objects QAObject array
"""
class QA:
def __init__(self, id, image, question, answer, question_objects, answer_objects):
self.id = id
self.image = image
self.question = question
self.answer = answer
self.q_objects = question_objects
self.a_objects = answer_objects
def __str__(self):
return 'id: %d, image: %d, question: %s, answer: %s' \
% (self.id, self.image.id, self.question, self.answer)
def __repr__(self):
return str(self)
"""
Question Answer Objects are localized in the image and refer to a part
of the question text or the answer text.
start_idx int
end_idx int
name string
synset_name string
synset_definition string
"""
class QAObject:
def __init__(self, start_idx, end_idx, name, synset):
self.start_idx = start_idx
self.end_idx = end_idx
self.name = name
self.synset = synset
def __repr__(self):
return str(self)
"""
Wordnet Synsets.
name string
definition string
"""
class Synset:
def __init__(self, name, definition):
self.name = name
self.definition = definition
def __str__(self):
return '{} - {}'.format(self.name, self.definition)
def __repr__(self):
return str(self)
| 4,469 | 21.923077 | 137 | py |
bottom-up-attention | bottom-up-attention-master/data/genome/visual_genome_python_driver/__init__.py | 0 | 0 | 0 | py |
|
bottom-up-attention | bottom-up-attention-master/caffe/tools/extra/summarize.py | #!/usr/bin/env python
"""Net summarization tool.
This tool summarizes the structure of a net in a concise but comprehensive
tabular listing, taking a prototxt file as input.
Use this tool to check at a glance that the computation you've specified is the
computation you expect.
"""
from caffe.proto import caffe_pb2
from google import protobuf
import re
import argparse
# ANSI codes for coloring blobs (used cyclically)
COLORS = ['92', '93', '94', '95', '97', '96', '42', '43;30', '100',
'444', '103;30', '107;30']
DISCONNECTED_COLOR = '41'
def read_net(filename):
net = caffe_pb2.NetParameter()
with open(filename) as f:
protobuf.text_format.Parse(f.read(), net)
return net
def format_param(param):
out = []
if len(param.name) > 0:
out.append(param.name)
if param.lr_mult != 1:
out.append('x{}'.format(param.lr_mult))
if param.decay_mult != 1:
out.append('Dx{}'.format(param.decay_mult))
return ' '.join(out)
def printed_len(s):
return len(re.sub(r'\033\[[\d;]+m', '', s))
def print_table(table, max_width):
"""Print a simple nicely-aligned table.
table must be a list of (equal-length) lists. Columns are space-separated,
and as narrow as possible, but no wider than max_width. Text may overflow
columns; note that unlike string.format, this will not affect subsequent
columns, if possible."""
max_widths = [max_width] * len(table[0])
column_widths = [max(printed_len(row[j]) + 1 for row in table)
for j in range(len(table[0]))]
column_widths = [min(w, max_w) for w, max_w in zip(column_widths, max_widths)]
for row in table:
row_str = ''
right_col = 0
for cell, width in zip(row, column_widths):
right_col += width
row_str += cell + ' '
row_str += ' ' * max(right_col - printed_len(row_str), 0)
print row_str
def summarize_net(net):
disconnected_tops = set()
for lr in net.layer:
disconnected_tops |= set(lr.top)
disconnected_tops -= set(lr.bottom)
table = []
colors = {}
for lr in net.layer:
tops = []
for ind, top in enumerate(lr.top):
color = colors.setdefault(top, COLORS[len(colors) % len(COLORS)])
if top in disconnected_tops:
top = '\033[1;4m' + top
if len(lr.loss_weight) > 0:
top = '{} * {}'.format(lr.loss_weight[ind], top)
tops.append('\033[{}m{}\033[0m'.format(color, top))
top_str = ', '.join(tops)
bottoms = []
for bottom in lr.bottom:
color = colors.get(bottom, DISCONNECTED_COLOR)
bottoms.append('\033[{}m{}\033[0m'.format(color, bottom))
bottom_str = ', '.join(bottoms)
if lr.type == 'Python':
type_str = lr.python_param.module + '.' + lr.python_param.layer
else:
type_str = lr.type
# Summarize conv/pool parameters.
# TODO support rectangular/ND parameters
conv_param = lr.convolution_param
if (lr.type in ['Convolution', 'Deconvolution']
and len(conv_param.kernel_size) == 1):
arg_str = str(conv_param.kernel_size[0])
if len(conv_param.stride) > 0 and conv_param.stride[0] != 1:
arg_str += '/' + str(conv_param.stride[0])
if len(conv_param.pad) > 0 and conv_param.pad[0] != 0:
arg_str += '+' + str(conv_param.pad[0])
arg_str += ' ' + str(conv_param.num_output)
if conv_param.group != 1:
arg_str += '/' + str(conv_param.group)
elif lr.type == 'Pooling':
arg_str = str(lr.pooling_param.kernel_size)
if lr.pooling_param.stride != 1:
arg_str += '/' + str(lr.pooling_param.stride)
if lr.pooling_param.pad != 0:
arg_str += '+' + str(lr.pooling_param.pad)
else:
arg_str = ''
if len(lr.param) > 0:
param_strs = map(format_param, lr.param)
if max(map(len, param_strs)) > 0:
param_str = '({})'.format(', '.join(param_strs))
else:
param_str = ''
else:
param_str = ''
table.append([lr.name, type_str, param_str, bottom_str, '->', top_str,
arg_str])
return table
def main():
parser = argparse.ArgumentParser(description="Print a concise summary of net computation.")
parser.add_argument('filename', help='net prototxt file to summarize')
parser.add_argument('-w', '--max-width', help='maximum field width',
type=int, default=30)
args = parser.parse_args()
net = read_net(args.filename)
table = summarize_net(net)
print_table(table, max_width=args.max_width)
if __name__ == '__main__':
main()
| 4,880 | 33.617021 | 95 | py |
bottom-up-attention | bottom-up-attention-master/caffe/tools/extra/extract_seconds.py | #!/usr/bin/env python
import datetime
import os
import sys
def extract_datetime_from_line(line, year):
# Expected format: I0210 13:39:22.381027 25210 solver.cpp:204] Iteration 100, lr = 0.00992565
line = line.strip().split()
month = int(line[0][1:3])
day = int(line[0][3:])
timestamp = line[1]
pos = timestamp.rfind('.')
ts = [int(x) for x in timestamp[:pos].split(':')]
hour = ts[0]
minute = ts[1]
second = ts[2]
microsecond = int(timestamp[pos + 1:])
dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)
return dt
def get_log_created_year(input_file):
"""Get year from log file system timestamp
"""
log_created_time = os.path.getctime(input_file)
log_created_year = datetime.datetime.fromtimestamp(log_created_time).year
return log_created_year
def get_start_time(line_iterable, year):
"""Find start time from group of lines
"""
start_datetime = None
for line in line_iterable:
line = line.strip()
if line.find('Solving') != -1:
start_datetime = extract_datetime_from_line(line, year)
break
return start_datetime
def extract_seconds(input_file, output_file):
with open(input_file, 'r') as f:
lines = f.readlines()
log_created_year = get_log_created_year(input_file)
start_datetime = get_start_time(lines, log_created_year)
assert start_datetime, 'Start time not found'
last_dt = start_datetime
out = open(output_file, 'w')
for line in lines:
line = line.strip()
if line.find('Iteration') != -1:
dt = extract_datetime_from_line(line, log_created_year)
# if it's another year
if dt.month < last_dt.month:
log_created_year += 1
dt = extract_datetime_from_line(line, log_created_year)
last_dt = dt
elapsed_seconds = (dt - start_datetime).total_seconds()
out.write('%f\n' % elapsed_seconds)
out.close()
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: ./extract_seconds input_file output_file')
exit(1)
extract_seconds(sys.argv[1], sys.argv[2])
| 2,208 | 29.260274 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.