body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
726ac45f4aba7e98c9672c77c9cf246504e1a01f8d9803e34dad100eddad552a | def at_object_creation(self):
'Called at first creation'
super().at_object_creation()
self.db.puzzle_value = 1
self.db.success_teleport_msg = 'You are successful!'
self.db.success_teleport_to = 'treasure room'
self.db.failure_teleport_msg = 'You fail!'
self.db.failure_teleport_to = 'dark cell' | Called at first creation | typeclasses/rooms/teleports.py | at_object_creation | Tiogaplanet/Druidia | 0 | python | def at_object_creation(self):
super().at_object_creation()
self.db.puzzle_value = 1
self.db.success_teleport_msg = 'You are successful!'
self.db.success_teleport_to = 'treasure room'
self.db.failure_teleport_msg = 'You fail!'
self.db.failure_teleport_to = 'dark cell' | def at_object_creation(self):
super().at_object_creation()
self.db.puzzle_value = 1
self.db.success_teleport_msg = 'You are successful!'
self.db.success_teleport_to = 'treasure room'
self.db.failure_teleport_msg = 'You fail!'
self.db.failure_teleport_to = 'dark cell'<|docstring|>Called at first creation<|endoftext|> |
3dbd675dd77db5089dd37e7c0d60298a5f67ee74a945c47cf4cf5f22afff43a6 | def at_object_receive(self, character, source_location):
'\n This hook is called by the engine whenever the player is moved into\n this room.\n '
if (not character.has_account):
return
is_success = (str(character.db.puzzle_clue) == str(self.db.puzzle_value))
teleport_to = (self.db.success_teleport_to if is_success else self.db.failure_teleport_to)
results = search_object(teleport_to)
if ((not results) or (len(results) > 1)):
character.msg(('no valid teleport target for %s was found.' % teleport_to))
return
if character.is_superuser:
character.msg(('Superuser block: You would have been teleported to %s.' % results[0]))
return
if is_success:
character.msg(self.db.success_teleport_msg)
else:
character.msg(self.db.failure_teleport_msg)
character.move_to(results[0], quiet=True, move_hooks=False)
results[0].at_object_receive(character, self) | This hook is called by the engine whenever the player is moved into
this room. | typeclasses/rooms/teleports.py | at_object_receive | Tiogaplanet/Druidia | 0 | python | def at_object_receive(self, character, source_location):
'\n This hook is called by the engine whenever the player is moved into\n this room.\n '
if (not character.has_account):
return
is_success = (str(character.db.puzzle_clue) == str(self.db.puzzle_value))
teleport_to = (self.db.success_teleport_to if is_success else self.db.failure_teleport_to)
results = search_object(teleport_to)
if ((not results) or (len(results) > 1)):
character.msg(('no valid teleport target for %s was found.' % teleport_to))
return
if character.is_superuser:
character.msg(('Superuser block: You would have been teleported to %s.' % results[0]))
return
if is_success:
character.msg(self.db.success_teleport_msg)
else:
character.msg(self.db.failure_teleport_msg)
character.move_to(results[0], quiet=True, move_hooks=False)
results[0].at_object_receive(character, self) | def at_object_receive(self, character, source_location):
'\n This hook is called by the engine whenever the player is moved into\n this room.\n '
if (not character.has_account):
return
is_success = (str(character.db.puzzle_clue) == str(self.db.puzzle_value))
teleport_to = (self.db.success_teleport_to if is_success else self.db.failure_teleport_to)
results = search_object(teleport_to)
if ((not results) or (len(results) > 1)):
character.msg(('no valid teleport target for %s was found.' % teleport_to))
return
if character.is_superuser:
character.msg(('Superuser block: You would have been teleported to %s.' % results[0]))
return
if is_success:
character.msg(self.db.success_teleport_msg)
else:
character.msg(self.db.failure_teleport_msg)
character.move_to(results[0], quiet=True, move_hooks=False)
results[0].at_object_receive(character, self)<|docstring|>This hook is called by the engine whenever the player is moved into
this room.<|endoftext|> |
9d08dbc9b5370fa4b887254694bc5bd2386546243d85fee29ca7c156d6e91193 | def not_readonly(path):
'\n Final function of the log\n Args:\n path: path of file\n\n '
print('Created log file {}'.format(path)) | Final function of the log
Args:
path: path of file | scripts/log_file_generator.py | not_readonly | ISISComputingGroup/EPICS-inst_servers | 1 | python | def not_readonly(path):
'\n Final function of the log\n Args:\n path: path of file\n\n '
print('Created log file {}'.format(path)) | def not_readonly(path):
'\n Final function of the log\n Args:\n path: path of file\n\n '
print('Created log file {}'.format(path))<|docstring|>Final function of the log
Args:
path: path of file<|endoftext|> |
a822bc8d24e40f4bc9e07edc0915b5577ed8a4fcc3e8ed2cf9b2c205eb43abc0 | def create_log(headers, columns, time_period, default_field, filename_template='default.log', host='127.0.0.1'):
'\n Create pv monitors based on the iocdatabase\n\n Returns: monitor for PV\n\n '
archive_mysql_abstraction_layer = SQLAbstraction('archive', 'report', '$report', host=host)
archiver_data_source = ArchiverDataSource(archive_mysql_abstraction_layer)
config_builder = ArchiveAccessConfigBuilder(filename_template, default_field=default_field)
for header in headers:
config_builder.header(header)
for (column_header, column_template) in columns:
config_builder.table_column(column_header, column_template)
adfc = ArchiveDataFileCreator(config_builder.build(), archiver_data_source, filename_template, make_file_readonly=not_readonly)
adfc.write_complete_file(time_period) | Create pv monitors based on the iocdatabase
Returns: monitor for PV | scripts/log_file_generator.py | create_log | ISISComputingGroup/EPICS-inst_servers | 1 | python | def create_log(headers, columns, time_period, default_field, filename_template='default.log', host='127.0.0.1'):
'\n Create pv monitors based on the iocdatabase\n\n Returns: monitor for PV\n\n '
archive_mysql_abstraction_layer = SQLAbstraction('archive', 'report', '$report', host=host)
archiver_data_source = ArchiverDataSource(archive_mysql_abstraction_layer)
config_builder = ArchiveAccessConfigBuilder(filename_template, default_field=default_field)
for header in headers:
config_builder.header(header)
for (column_header, column_template) in columns:
config_builder.table_column(column_header, column_template)
adfc = ArchiveDataFileCreator(config_builder.build(), archiver_data_source, filename_template, make_file_readonly=not_readonly)
adfc.write_complete_file(time_period) | def create_log(headers, columns, time_period, default_field, filename_template='default.log', host='127.0.0.1'):
'\n Create pv monitors based on the iocdatabase\n\n Returns: monitor for PV\n\n '
archive_mysql_abstraction_layer = SQLAbstraction('archive', 'report', '$report', host=host)
archiver_data_source = ArchiverDataSource(archive_mysql_abstraction_layer)
config_builder = ArchiveAccessConfigBuilder(filename_template, default_field=default_field)
for header in headers:
config_builder.header(header)
for (column_header, column_template) in columns:
config_builder.table_column(column_header, column_template)
adfc = ArchiveDataFileCreator(config_builder.build(), archiver_data_source, filename_template, make_file_readonly=not_readonly)
adfc.write_complete_file(time_period)<|docstring|>Create pv monitors based on the iocdatabase
Returns: monitor for PV<|endoftext|> |
29cb2fc6d7a79db6378bede35fe37e443a49bac0851a167ab82a81655bc267e0 | def init_model(self, session, checkpoint_file):
'\n Initializes DualCam-Net network parameters.\n '
variables_to_restore = slim.get_model_variables(self.scope)
init_fn = slim.assign_from_checkpoint_fn(checkpoint_file, variables_to_restore)
init_fn(session) | Initializes DualCam-Net network parameters. | models/unet_z.py | init_model | IIT-PAVIS/Acoustic-Image-Generation | 0 | python | def init_model(self, session, checkpoint_file):
'\n \n '
variables_to_restore = slim.get_model_variables(self.scope)
init_fn = slim.assign_from_checkpoint_fn(checkpoint_file, variables_to_restore)
init_fn(session) | def init_model(self, session, checkpoint_file):
'\n \n '
variables_to_restore = slim.get_model_variables(self.scope)
init_fn = slim.assign_from_checkpoint_fn(checkpoint_file, variables_to_restore)
init_fn(session)<|docstring|>Initializes DualCam-Net network parameters.<|endoftext|> |
efb986e8ff7da0d8d5fe4153b4517182d83ac31e3df290dc604e74a0e5282fd2 | def _build_network(self, inputs, mean2, std2, is_training=True, keep_prob=0.5, weight_decay=1e-06, scope='UNetAcoustic'):
'\n Builds a three-layer network that operates over a spectrogram.\n '
with tf.variable_scope(scope, 'UNetAcoustic', [inputs]) as sc:
end_points_collection = (sc.original_name_scope + '_end_points')
(conv1, pool1) = self.conv_conv_pool(inputs, [128, 128], is_training, weight_decay, name='1', strides=(3, 3))
conv2 = self.conv_conv_pool(pool1, [133, 133], is_training, weight_decay, name='3', pool=False)
mean = tf.layers.conv2d(conv2, 150, (12, 16), padding='VALID', name='mean')
mean = tf.reshape(mean, ((- 1), 150))
std = tf.nn.softplus(tf.layers.conv2d(conv2, 150, (12, 16), padding='VALID', name='std'))
std = tf.reshape(std, ((- 1), 150))
samples = tf.random_normal([tf.shape(std)[0], tf.shape(std)[1]], 0, 1, dtype=tf.float32)
guessed_z = (mean2 + (std2 * samples))
net = tf.layers.dense(guessed_z, ((12 * 16) * 12), activation=tf.nn.relu)
net = tf.reshape(net, ((- 1), 12, 16, 12))
net = tf.layers.conv2d(net, 133, (3, 3), activation=tf.nn.relu, padding='same')
up1 = self.upconv(net, 128, weight_decay, name='1', strides=3)
conv4 = self.conv_conv_pool(up1, [128, 128], is_training, weight_decay, name='4', pool=False)
conv5 = self.conv_conv_pool(conv4, [128, 128], is_training, weight_decay, name='5', pool=False)
net = tf.layers.conv2d(conv5, 12, (3, 3), name='final', activation=tf.nn.sigmoid, padding='same')
end_points = slim.layers.utils.convert_collection_to_dict(end_points_collection)
end_points['features'] = conv2
return (mean, std, net, end_points) | Builds a three-layer network that operates over a spectrogram. | models/unet_z.py | _build_network | IIT-PAVIS/Acoustic-Image-Generation | 0 | python | def _build_network(self, inputs, mean2, std2, is_training=True, keep_prob=0.5, weight_decay=1e-06, scope='UNetAcoustic'):
'\n \n '
with tf.variable_scope(scope, 'UNetAcoustic', [inputs]) as sc:
end_points_collection = (sc.original_name_scope + '_end_points')
(conv1, pool1) = self.conv_conv_pool(inputs, [128, 128], is_training, weight_decay, name='1', strides=(3, 3))
conv2 = self.conv_conv_pool(pool1, [133, 133], is_training, weight_decay, name='3', pool=False)
mean = tf.layers.conv2d(conv2, 150, (12, 16), padding='VALID', name='mean')
mean = tf.reshape(mean, ((- 1), 150))
std = tf.nn.softplus(tf.layers.conv2d(conv2, 150, (12, 16), padding='VALID', name='std'))
std = tf.reshape(std, ((- 1), 150))
samples = tf.random_normal([tf.shape(std)[0], tf.shape(std)[1]], 0, 1, dtype=tf.float32)
guessed_z = (mean2 + (std2 * samples))
net = tf.layers.dense(guessed_z, ((12 * 16) * 12), activation=tf.nn.relu)
net = tf.reshape(net, ((- 1), 12, 16, 12))
net = tf.layers.conv2d(net, 133, (3, 3), activation=tf.nn.relu, padding='same')
up1 = self.upconv(net, 128, weight_decay, name='1', strides=3)
conv4 = self.conv_conv_pool(up1, [128, 128], is_training, weight_decay, name='4', pool=False)
conv5 = self.conv_conv_pool(conv4, [128, 128], is_training, weight_decay, name='5', pool=False)
net = tf.layers.conv2d(conv5, 12, (3, 3), name='final', activation=tf.nn.sigmoid, padding='same')
end_points = slim.layers.utils.convert_collection_to_dict(end_points_collection)
end_points['features'] = conv2
return (mean, std, net, end_points) | def _build_network(self, inputs, mean2, std2, is_training=True, keep_prob=0.5, weight_decay=1e-06, scope='UNetAcoustic'):
'\n \n '
with tf.variable_scope(scope, 'UNetAcoustic', [inputs]) as sc:
end_points_collection = (sc.original_name_scope + '_end_points')
(conv1, pool1) = self.conv_conv_pool(inputs, [128, 128], is_training, weight_decay, name='1', strides=(3, 3))
conv2 = self.conv_conv_pool(pool1, [133, 133], is_training, weight_decay, name='3', pool=False)
mean = tf.layers.conv2d(conv2, 150, (12, 16), padding='VALID', name='mean')
mean = tf.reshape(mean, ((- 1), 150))
std = tf.nn.softplus(tf.layers.conv2d(conv2, 150, (12, 16), padding='VALID', name='std'))
std = tf.reshape(std, ((- 1), 150))
samples = tf.random_normal([tf.shape(std)[0], tf.shape(std)[1]], 0, 1, dtype=tf.float32)
guessed_z = (mean2 + (std2 * samples))
net = tf.layers.dense(guessed_z, ((12 * 16) * 12), activation=tf.nn.relu)
net = tf.reshape(net, ((- 1), 12, 16, 12))
net = tf.layers.conv2d(net, 133, (3, 3), activation=tf.nn.relu, padding='same')
up1 = self.upconv(net, 128, weight_decay, name='1', strides=3)
conv4 = self.conv_conv_pool(up1, [128, 128], is_training, weight_decay, name='4', pool=False)
conv5 = self.conv_conv_pool(conv4, [128, 128], is_training, weight_decay, name='5', pool=False)
net = tf.layers.conv2d(conv5, 12, (3, 3), name='final', activation=tf.nn.sigmoid, padding='same')
end_points = slim.layers.utils.convert_collection_to_dict(end_points_collection)
end_points['features'] = conv2
return (mean, std, net, end_points)<|docstring|>Builds a three-layer network that operates over a spectrogram.<|endoftext|> |
8a688f0d9a8eb7a8bc4c02fb12af13d5fe84696bd636cc7047bc5ab5f3d7b533 | def _build_model(self, acoustic_images, mean2, std2):
'\n Builds the hybrid model using slim and base functions.\n '
is_training = tf.placeholder(tf.bool, name='is_training')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
end_points = OrderedDict({'input': acoustic_images, 'is_training': is_training, 'keep_prob': keep_prob})
(mean, std, dualcam_net_output, dualcam_net_end_points) = self._build_network(acoustic_images, mean2, std2, is_training=is_training, scope=self.scope)
end_points.update(dualcam_net_end_points)
self.mean = mean
self.std = std
self.output = dualcam_net_output
self.network = end_points
self.train_vars = slim.get_trainable_variables(self.scope) | Builds the hybrid model using slim and base functions. | models/unet_z.py | _build_model | IIT-PAVIS/Acoustic-Image-Generation | 0 | python | def _build_model(self, acoustic_images, mean2, std2):
'\n \n '
is_training = tf.placeholder(tf.bool, name='is_training')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
end_points = OrderedDict({'input': acoustic_images, 'is_training': is_training, 'keep_prob': keep_prob})
(mean, std, dualcam_net_output, dualcam_net_end_points) = self._build_network(acoustic_images, mean2, std2, is_training=is_training, scope=self.scope)
end_points.update(dualcam_net_end_points)
self.mean = mean
self.std = std
self.output = dualcam_net_output
self.network = end_points
self.train_vars = slim.get_trainable_variables(self.scope) | def _build_model(self, acoustic_images, mean2, std2):
'\n \n '
is_training = tf.placeholder(tf.bool, name='is_training')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
end_points = OrderedDict({'input': acoustic_images, 'is_training': is_training, 'keep_prob': keep_prob})
(mean, std, dualcam_net_output, dualcam_net_end_points) = self._build_network(acoustic_images, mean2, std2, is_training=is_training, scope=self.scope)
end_points.update(dualcam_net_end_points)
self.mean = mean
self.std = std
self.output = dualcam_net_output
self.network = end_points
self.train_vars = slim.get_trainable_variables(self.scope)<|docstring|>Builds the hybrid model using slim and base functions.<|endoftext|> |
86b6d460b95b628073a4ff2bb65201c07ca0f88773daf03ac27db4fb435151f1 | def conv_conv_pool(self, input_, n_filters, is_training, weight_decay, name, pool=True, activation=tf.nn.relu, padding='same', filters=(3, 3), strides=(2, 2)):
'{Conv -> BN -> RELU}x2 -> {Pool, optional}\n Args:\n input_ (4-D Tensor): (batch_size, H, W, C)\n n_filters (list): number of filters [int, int]\n training (1-D Tensor): Boolean Tensor\n name (str): name postfix\n pool (bool): If True, MaxPool2D\n activation: Activaion functions\n Returns:\n net: output of the Convolution operations\n pool (optional): output of the max pooling operations\n '
net = input_
with tf.variable_scope('layer{}'.format(name)):
for (i, F) in enumerate(n_filters):
net = tf.layers.conv2d(net, F, (3, 3), activation=None, padding='same', kernel_regularizer=None, name='conv_{}'.format((i + 1)), kernel_initializer=tf.contrib.layers.xavier_initializer())
net = activation(net, name='relu{}_{}'.format(name, (i + 1)))
if (pool is False):
return net
pool = tf.layers.conv2d(net, F, filters, strides=strides, activation=None, padding=padding, kernel_regularizer=None, name='pool_{}'.format((i + 1)), kernel_initializer=tf.contrib.layers.xavier_initializer())
pool = activation(pool, name='relu_pool{}_{}'.format(name, (i + 1)))
return (net, pool) | {Conv -> BN -> RELU}x2 -> {Pool, optional}
Args:
input_ (4-D Tensor): (batch_size, H, W, C)
n_filters (list): number of filters [int, int]
training (1-D Tensor): Boolean Tensor
name (str): name postfix
pool (bool): If True, MaxPool2D
activation: Activaion functions
Returns:
net: output of the Convolution operations
pool (optional): output of the max pooling operations | models/unet_z.py | conv_conv_pool | IIT-PAVIS/Acoustic-Image-Generation | 0 | python | def conv_conv_pool(self, input_, n_filters, is_training, weight_decay, name, pool=True, activation=tf.nn.relu, padding='same', filters=(3, 3), strides=(2, 2)):
'{Conv -> BN -> RELU}x2 -> {Pool, optional}\n Args:\n input_ (4-D Tensor): (batch_size, H, W, C)\n n_filters (list): number of filters [int, int]\n training (1-D Tensor): Boolean Tensor\n name (str): name postfix\n pool (bool): If True, MaxPool2D\n activation: Activaion functions\n Returns:\n net: output of the Convolution operations\n pool (optional): output of the max pooling operations\n '
net = input_
with tf.variable_scope('layer{}'.format(name)):
for (i, F) in enumerate(n_filters):
net = tf.layers.conv2d(net, F, (3, 3), activation=None, padding='same', kernel_regularizer=None, name='conv_{}'.format((i + 1)), kernel_initializer=tf.contrib.layers.xavier_initializer())
net = activation(net, name='relu{}_{}'.format(name, (i + 1)))
if (pool is False):
return net
pool = tf.layers.conv2d(net, F, filters, strides=strides, activation=None, padding=padding, kernel_regularizer=None, name='pool_{}'.format((i + 1)), kernel_initializer=tf.contrib.layers.xavier_initializer())
pool = activation(pool, name='relu_pool{}_{}'.format(name, (i + 1)))
return (net, pool) | def conv_conv_pool(self, input_, n_filters, is_training, weight_decay, name, pool=True, activation=tf.nn.relu, padding='same', filters=(3, 3), strides=(2, 2)):
'{Conv -> BN -> RELU}x2 -> {Pool, optional}\n Args:\n input_ (4-D Tensor): (batch_size, H, W, C)\n n_filters (list): number of filters [int, int]\n training (1-D Tensor): Boolean Tensor\n name (str): name postfix\n pool (bool): If True, MaxPool2D\n activation: Activaion functions\n Returns:\n net: output of the Convolution operations\n pool (optional): output of the max pooling operations\n '
net = input_
with tf.variable_scope('layer{}'.format(name)):
for (i, F) in enumerate(n_filters):
net = tf.layers.conv2d(net, F, (3, 3), activation=None, padding='same', kernel_regularizer=None, name='conv_{}'.format((i + 1)), kernel_initializer=tf.contrib.layers.xavier_initializer())
net = activation(net, name='relu{}_{}'.format(name, (i + 1)))
if (pool is False):
return net
pool = tf.layers.conv2d(net, F, filters, strides=strides, activation=None, padding=padding, kernel_regularizer=None, name='pool_{}'.format((i + 1)), kernel_initializer=tf.contrib.layers.xavier_initializer())
pool = activation(pool, name='relu_pool{}_{}'.format(name, (i + 1)))
return (net, pool)<|docstring|>{Conv -> BN -> RELU}x2 -> {Pool, optional}
Args:
input_ (4-D Tensor): (batch_size, H, W, C)
n_filters (list): number of filters [int, int]
training (1-D Tensor): Boolean Tensor
name (str): name postfix
pool (bool): If True, MaxPool2D
activation: Activaion functions
Returns:
net: output of the Convolution operations
pool (optional): output of the max pooling operations<|endoftext|> |
08371247b71e9e08cb792ab4624ae57222e9373b4e5849ff2ab248301eaa5c60 | def upconv(self, inputA, n_filter, weight_decay, name, kernel_size=2, strides=2):
'Upsample `inputA` and concat with `input_B`\n Args:\n input_A (4-D Tensor): (N, H, W, C)\n input_B (4-D Tensor): (N, 2*H, 2*H, C2)\n name (str): name of the concat operation\n Returns:\n output (4-D Tensor): (N, 2*H, 2*W, C + C2)\n '
up_conv = self.upconv_2D(inputA, n_filter, weight_decay, name, kernel_size=kernel_size, strides=strides)
return up_conv | Upsample `inputA` and concat with `input_B`
Args:
input_A (4-D Tensor): (N, H, W, C)
input_B (4-D Tensor): (N, 2*H, 2*H, C2)
name (str): name of the concat operation
Returns:
output (4-D Tensor): (N, 2*H, 2*W, C + C2) | models/unet_z.py | upconv | IIT-PAVIS/Acoustic-Image-Generation | 0 | python | def upconv(self, inputA, n_filter, weight_decay, name, kernel_size=2, strides=2):
'Upsample `inputA` and concat with `input_B`\n Args:\n input_A (4-D Tensor): (N, H, W, C)\n input_B (4-D Tensor): (N, 2*H, 2*H, C2)\n name (str): name of the concat operation\n Returns:\n output (4-D Tensor): (N, 2*H, 2*W, C + C2)\n '
up_conv = self.upconv_2D(inputA, n_filter, weight_decay, name, kernel_size=kernel_size, strides=strides)
return up_conv | def upconv(self, inputA, n_filter, weight_decay, name, kernel_size=2, strides=2):
'Upsample `inputA` and concat with `input_B`\n Args:\n input_A (4-D Tensor): (N, H, W, C)\n input_B (4-D Tensor): (N, 2*H, 2*H, C2)\n name (str): name of the concat operation\n Returns:\n output (4-D Tensor): (N, 2*H, 2*W, C + C2)\n '
up_conv = self.upconv_2D(inputA, n_filter, weight_decay, name, kernel_size=kernel_size, strides=strides)
return up_conv<|docstring|>Upsample `inputA` and concat with `input_B`
Args:
input_A (4-D Tensor): (N, H, W, C)
input_B (4-D Tensor): (N, 2*H, 2*H, C2)
name (str): name of the concat operation
Returns:
output (4-D Tensor): (N, 2*H, 2*W, C + C2)<|endoftext|> |
636e7d436af0a6af5af48c47abe8e0f7bb2f7b91ff7614b88a799529f1fc4512 | def upconv_2D(self, tensor, n_filter, weight_decay, name, kernel_size=2, strides=2):
'Up Convolution `tensor` by 2 times\n Args:\n tensor (4-D Tensor): (N, H, W, C)\n n_filter (int): Filter Size\n name (str): name of upsampling operations\n Returns:\n output (4-D Tensor): (N, 2 * H, 2 * W, C)\n '
return tf.layers.conv2d_transpose(tensor, filters=n_filter, kernel_size=kernel_size, strides=strides, kernel_regularizer=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='upsample_{}'.format(name)) | Up Convolution `tensor` by 2 times
Args:
tensor (4-D Tensor): (N, H, W, C)
n_filter (int): Filter Size
name (str): name of upsampling operations
Returns:
output (4-D Tensor): (N, 2 * H, 2 * W, C) | models/unet_z.py | upconv_2D | IIT-PAVIS/Acoustic-Image-Generation | 0 | python | def upconv_2D(self, tensor, n_filter, weight_decay, name, kernel_size=2, strides=2):
'Up Convolution `tensor` by 2 times\n Args:\n tensor (4-D Tensor): (N, H, W, C)\n n_filter (int): Filter Size\n name (str): name of upsampling operations\n Returns:\n output (4-D Tensor): (N, 2 * H, 2 * W, C)\n '
return tf.layers.conv2d_transpose(tensor, filters=n_filter, kernel_size=kernel_size, strides=strides, kernel_regularizer=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='upsample_{}'.format(name)) | def upconv_2D(self, tensor, n_filter, weight_decay, name, kernel_size=2, strides=2):
'Up Convolution `tensor` by 2 times\n Args:\n tensor (4-D Tensor): (N, H, W, C)\n n_filter (int): Filter Size\n name (str): name of upsampling operations\n Returns:\n output (4-D Tensor): (N, 2 * H, 2 * W, C)\n '
return tf.layers.conv2d_transpose(tensor, filters=n_filter, kernel_size=kernel_size, strides=strides, kernel_regularizer=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), name='upsample_{}'.format(name))<|docstring|>Up Convolution `tensor` by 2 times
Args:
tensor (4-D Tensor): (N, H, W, C)
n_filter (int): Filter Size
name (str): name of upsampling operations
Returns:
output (4-D Tensor): (N, 2 * H, 2 * W, C)<|endoftext|> |
8a86523fc0dee7e8a9de912731bfdc428a41d7e9aa02414852264282e9562763 | def deprecated(dies_at):
'Mark a flask view as deprecated\n\n Usage:\n @deprecated(dies_at=datetime(2015, 8, 1))\n '
def decorator(view):
@wraps(view)
def func(*args, **kwargs):
message = "Calling deprecated view '{view_name}'. Dies in {time_left}."
time_left = (dies_at - datetime.utcnow())
extra = {'view_name': view.__name__, 'time_left': time_left}
if (time_left < timedelta(days=7)):
current_app.logger.error(message, extra=extra)
else:
current_app.logger.warning(message, extra=extra)
response = view(*args, **kwargs)
response.headers['DM-Deprecated'] = 'Dies in {}'.format(time_left)
return response
return func
return decorator | Mark a flask view as deprecated
Usage:
@deprecated(dies_at=datetime(2015, 8, 1)) | dmutils/deprecation.py | deprecated | ndavisontest/dto-digitalmarketplace-utils | 3 | python | def deprecated(dies_at):
'Mark a flask view as deprecated\n\n Usage:\n @deprecated(dies_at=datetime(2015, 8, 1))\n '
def decorator(view):
@wraps(view)
def func(*args, **kwargs):
message = "Calling deprecated view '{view_name}'. Dies in {time_left}."
time_left = (dies_at - datetime.utcnow())
extra = {'view_name': view.__name__, 'time_left': time_left}
if (time_left < timedelta(days=7)):
current_app.logger.error(message, extra=extra)
else:
current_app.logger.warning(message, extra=extra)
response = view(*args, **kwargs)
response.headers['DM-Deprecated'] = 'Dies in {}'.format(time_left)
return response
return func
return decorator | def deprecated(dies_at):
'Mark a flask view as deprecated\n\n Usage:\n @deprecated(dies_at=datetime(2015, 8, 1))\n '
def decorator(view):
@wraps(view)
def func(*args, **kwargs):
message = "Calling deprecated view '{view_name}'. Dies in {time_left}."
time_left = (dies_at - datetime.utcnow())
extra = {'view_name': view.__name__, 'time_left': time_left}
if (time_left < timedelta(days=7)):
current_app.logger.error(message, extra=extra)
else:
current_app.logger.warning(message, extra=extra)
response = view(*args, **kwargs)
response.headers['DM-Deprecated'] = 'Dies in {}'.format(time_left)
return response
return func
return decorator<|docstring|>Mark a flask view as deprecated
Usage:
@deprecated(dies_at=datetime(2015, 8, 1))<|endoftext|> |
374d1b83457c67d67ce3a261e26fefd834c46fe6556f65041eebc4ccf9e222b4 | def to_dict(self):
'\n Convert the object into a json serializable dictionary.\n Note: It uses the private method _save_to_input_dict of the parent.\n :return dict: json serializable dictionary containing the needed information to instantiate the object\n '
input_dict = super(ChangeKernelBase, self)._save_to_input_dict()
input_dict['class'] = str('ChangeKernel')
return input_dict | Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object | GPy_ABCD/Kernels/changeOperators.py | to_dict | juanluislm/GPy-ABCD | 0 | python | def to_dict(self):
'\n Convert the object into a json serializable dictionary.\n Note: It uses the private method _save_to_input_dict of the parent.\n :return dict: json serializable dictionary containing the needed information to instantiate the object\n '
input_dict = super(ChangeKernelBase, self)._save_to_input_dict()
input_dict['class'] = str('ChangeKernel')
return input_dict | def to_dict(self):
'\n Convert the object into a json serializable dictionary.\n Note: It uses the private method _save_to_input_dict of the parent.\n :return dict: json serializable dictionary containing the needed information to instantiate the object\n '
input_dict = super(ChangeKernelBase, self)._save_to_input_dict()
input_dict['class'] = str('ChangeKernel')
return input_dict<|docstring|>Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object<|endoftext|> |
51452e03d667aefbb6f2fb654db607ad4261a6b1368ce5e3cac1825d4206d696 | def process(self):
'\n Process the expression in whatever ways are specified by the state options.\n '
self._execute()
self._post_process()
self.state._inspect('expr', BP_AFTER, expr=self.expr) | Process the expression in whatever ways are specified by the state options. | angr/engines/vex/expressions/base.py | process | cclauss/angr | 2 | python | def process(self):
'\n \n '
self._execute()
self._post_process()
self.state._inspect('expr', BP_AFTER, expr=self.expr) | def process(self):
'\n \n '
self._execute()
self._post_process()
self.state._inspect('expr', BP_AFTER, expr=self.expr)<|docstring|>Process the expression in whatever ways are specified by the state options.<|endoftext|> |
a7a00455e5f5764677204028f74b79f269e5e27bc7bb1cf02b073522c7923cf8 | def _translate_expr(self, expr):
'Translate a single IRExpr, honoring mode and options and so forth. Also updates state...'
e = translate_expr(expr, self.state)
self._record_expr(e)
self.child_exprs.append(e)
return e | Translate a single IRExpr, honoring mode and options and so forth. Also updates state... | angr/engines/vex/expressions/base.py | _translate_expr | cclauss/angr | 2 | python | def _translate_expr(self, expr):
e = translate_expr(expr, self.state)
self._record_expr(e)
self.child_exprs.append(e)
return e | def _translate_expr(self, expr):
e = translate_expr(expr, self.state)
self._record_expr(e)
self.child_exprs.append(e)
return e<|docstring|>Translate a single IRExpr, honoring mode and options and so forth. Also updates state...<|endoftext|> |
89ea995c953b1991c40b08cfa9d53ba4a40365aff60595273f038dfd3e6216ba | def _translate_exprs(self, exprs):
'Translates a sequence of IRExprs into SimIRExprs.'
return [self._translate_expr(e) for e in exprs] | Translates a sequence of IRExprs into SimIRExprs. | angr/engines/vex/expressions/base.py | _translate_exprs | cclauss/angr | 2 | python | def _translate_exprs(self, exprs):
return [self._translate_expr(e) for e in exprs] | def _translate_exprs(self, exprs):
return [self._translate_expr(e) for e in exprs]<|docstring|>Translates a sequence of IRExprs into SimIRExprs.<|endoftext|> |
cfa969d598fb94af19b483b9dcf93a115c224b33770da5657ab2c54df933a466 | def reg_deps(self):
'\n Returns a set of registers that this IRExpr depends on.\n '
if ((len(self.actions) == 0) or (o.ACTION_DEPS not in self.state.options)):
return _nonset
else:
return frozenset.union(*[r.reg_deps for r in self.actions if (type(r) in (SimActionData, SimActionOperation))]) | Returns a set of registers that this IRExpr depends on. | angr/engines/vex/expressions/base.py | reg_deps | cclauss/angr | 2 | python | def reg_deps(self):
'\n \n '
if ((len(self.actions) == 0) or (o.ACTION_DEPS not in self.state.options)):
return _nonset
else:
return frozenset.union(*[r.reg_deps for r in self.actions if (type(r) in (SimActionData, SimActionOperation))]) | def reg_deps(self):
'\n \n '
if ((len(self.actions) == 0) or (o.ACTION_DEPS not in self.state.options)):
return _nonset
else:
return frozenset.union(*[r.reg_deps for r in self.actions if (type(r) in (SimActionData, SimActionOperation))])<|docstring|>Returns a set of registers that this IRExpr depends on.<|endoftext|> |
675c1905aece450469f22da828703ad52bf93289c3872e98e6936f2e50569a71 | def tmp_deps(self):
'\n Returns a set of tmps that this IRExpr depends on\n '
if ((len(self.actions) == 0) or (o.ACTION_DEPS not in self.state.options)):
return _nonset
else:
return frozenset.union(*[r.tmp_deps for r in self.actions if (type(r) in (SimActionData, SimActionOperation))]) | Returns a set of tmps that this IRExpr depends on | angr/engines/vex/expressions/base.py | tmp_deps | cclauss/angr | 2 | python | def tmp_deps(self):
'\n \n '
if ((len(self.actions) == 0) or (o.ACTION_DEPS not in self.state.options)):
return _nonset
else:
return frozenset.union(*[r.tmp_deps for r in self.actions if (type(r) in (SimActionData, SimActionOperation))]) | def tmp_deps(self):
'\n \n '
if ((len(self.actions) == 0) or (o.ACTION_DEPS not in self.state.options)):
return _nonset
else:
return frozenset.union(*[r.tmp_deps for r in self.actions if (type(r) in (SimActionData, SimActionOperation))])<|docstring|>Returns a set of tmps that this IRExpr depends on<|endoftext|> |
b4ee8ef157dc98c9f49f2898f375b0888ee5a381963438ce7e3cd788d4636741 | def sample_address(user, **params):
'Create and return a sample address object'
defaults = {'name': 'Name', 'line1': 'Line1', 'line2': 'Line2', 'city': 'City Name', 'district': 'Disctict Name', 'state': 'State', 'pincode': '123456', 'addressType': models.UserAddress.HOME}
defaults.update(params)
return models.UserAddress.objects.create(user=user, **defaults) | Create and return a sample address object | core/tests/test_models.py | sample_address | abhishekmorya/organic-shop-rest-api | 0 | python | def sample_address(user, **params):
defaults = {'name': 'Name', 'line1': 'Line1', 'line2': 'Line2', 'city': 'City Name', 'district': 'Disctict Name', 'state': 'State', 'pincode': '123456', 'addressType': models.UserAddress.HOME}
defaults.update(params)
return models.UserAddress.objects.create(user=user, **defaults) | def sample_address(user, **params):
defaults = {'name': 'Name', 'line1': 'Line1', 'line2': 'Line2', 'city': 'City Name', 'district': 'Disctict Name', 'state': 'State', 'pincode': '123456', 'addressType': models.UserAddress.HOME}
defaults.update(params)
return models.UserAddress.objects.create(user=user, **defaults)<|docstring|>Create and return a sample address object<|endoftext|> |
65ed6358ca03acfc74781e23b8e5bac2592fbaea451e7b7ae9485c77ed4460ab | def sample_category(user, **params):
'Create and return a sample category object'
defaults = {'name': 'Fruits', 'desc': 'Fresh Fruits'}
defaults.update(params)
return models.Category.objects.create(user=user, **defaults) | Create and return a sample category object | core/tests/test_models.py | sample_category | abhishekmorya/organic-shop-rest-api | 0 | python | def sample_category(user, **params):
defaults = {'name': 'Fruits', 'desc': 'Fresh Fruits'}
defaults.update(params)
return models.Category.objects.create(user=user, **defaults) | def sample_category(user, **params):
defaults = {'name': 'Fruits', 'desc': 'Fresh Fruits'}
defaults.update(params)
return models.Category.objects.create(user=user, **defaults)<|docstring|>Create and return a sample category object<|endoftext|> |
2f82c66ce466ffe6bc861e15c46c19ff27e1fb68adaf0098275d557e77835f67 | def sample_product(user, **params):
'Create and return a sample product object'
category = (params['category'] if ('category' in params.keys()) else sample_category(user))
defaults = {'category': category, 'title': 'Brown Bread', 'desc': 'Healthy Brown bread', 'price': 23.0, 'quantity': 5, 'unit': models.Product.UNIT, 'image': 'image url'}
defaults.update(params)
return models.Product.objects.create(user=user, **defaults) | Create and return a sample product object | core/tests/test_models.py | sample_product | abhishekmorya/organic-shop-rest-api | 0 | python | def sample_product(user, **params):
category = (params['category'] if ('category' in params.keys()) else sample_category(user))
defaults = {'category': category, 'title': 'Brown Bread', 'desc': 'Healthy Brown bread', 'price': 23.0, 'quantity': 5, 'unit': models.Product.UNIT, 'image': 'image url'}
defaults.update(params)
return models.Product.objects.create(user=user, **defaults) | def sample_product(user, **params):
category = (params['category'] if ('category' in params.keys()) else sample_category(user))
defaults = {'category': category, 'title': 'Brown Bread', 'desc': 'Healthy Brown bread', 'price': 23.0, 'quantity': 5, 'unit': models.Product.UNIT, 'image': 'image url'}
defaults.update(params)
return models.Product.objects.create(user=user, **defaults)<|docstring|>Create and return a sample product object<|endoftext|> |
f9dcd04820c34471752362bb89c3a0b8e03cb22ee3e20fb844851d374298ca93 | def test_create_user_with_email_successful(self):
'Test creating user with new email'
email = '[email protected]'
password = 'testpassword'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password)) | Test creating user with new email | core/tests/test_models.py | test_create_user_with_email_successful | abhishekmorya/organic-shop-rest-api | 0 | python | def test_create_user_with_email_successful(self):
email = '[email protected]'
password = 'testpassword'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password)) | def test_create_user_with_email_successful(self):
email = '[email protected]'
password = 'testpassword'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))<|docstring|>Test creating user with new email<|endoftext|> |
b09329b3f44223dedd65ac47b3bd133165ff589b09ef2b6cce77752a292d3418 | def test_new_user_with_email_normalize(self):
'Test the new user created with email normalized'
email = '[email protected]'
password = 'testpassword'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email.lower()) | Test the new user created with email normalized | core/tests/test_models.py | test_new_user_with_email_normalize | abhishekmorya/organic-shop-rest-api | 0 | python | def test_new_user_with_email_normalize(self):
email = '[email protected]'
password = 'testpassword'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email.lower()) | def test_new_user_with_email_normalize(self):
email = '[email protected]'
password = 'testpassword'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email.lower())<|docstring|>Test the new user created with email normalized<|endoftext|> |
ffb24f278ee69a091959831ae3a5b7d129038c5637263524b5f2cb72a5db8107 | def test_new_user_invalid_mail(self):
'Test creating user with no email raise Value error'
with self.assertRaises(ValueError):
get_user_model().objects.create_user(email=None, password='Testpassword') | Test creating user with no email raise Value error | core/tests/test_models.py | test_new_user_invalid_mail | abhishekmorya/organic-shop-rest-api | 0 | python | def test_new_user_invalid_mail(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(email=None, password='Testpassword') | def test_new_user_invalid_mail(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(email=None, password='Testpassword')<|docstring|>Test creating user with no email raise Value error<|endoftext|> |
ace5ac9606f2aa94f6a9c831b4393cc25f84ebcaeb89e1c78a8c147763a1025c | def test_create_new_superuser(self):
'Test creating a new superuser'
email = '[email protected]'
password = 'testpassword'
user = get_user_model().objects.create_superuser(email=email, password=password)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser) | Test creating a new superuser | core/tests/test_models.py | test_create_new_superuser | abhishekmorya/organic-shop-rest-api | 0 | python | def test_create_new_superuser(self):
email = '[email protected]'
password = 'testpassword'
user = get_user_model().objects.create_superuser(email=email, password=password)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser) | def test_create_new_superuser(self):
email = '[email protected]'
password = 'testpassword'
user = get_user_model().objects.create_superuser(email=email, password=password)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)<|docstring|>Test creating a new superuser<|endoftext|> |
1e1148317effd06cd206724a17558dbdf45470496d2d6150ebe078167d5e72b6 | def test_address_str(self):
'Test the address string representation'
user = sample_user()
payload = {'name': 'Name', 'line1': 'Line1', 'line2': 'Line2', 'city': 'City Name', 'district': 'Disctict Name', 'state': 'State', 'pincode': '123456', 'addressType': models.UserAddress.HOME}
address = models.UserAddress.objects.create(user=user, **payload)
self.assertEqual(str(address), 'name: {}, district: {}, state: {}, pincode: {}'.format(address.name, address.district, address.state, address.pincode)) | Test the address string representation | core/tests/test_models.py | test_address_str | abhishekmorya/organic-shop-rest-api | 0 | python | def test_address_str(self):
user = sample_user()
payload = {'name': 'Name', 'line1': 'Line1', 'line2': 'Line2', 'city': 'City Name', 'district': 'Disctict Name', 'state': 'State', 'pincode': '123456', 'addressType': models.UserAddress.HOME}
address = models.UserAddress.objects.create(user=user, **payload)
self.assertEqual(str(address), 'name: {}, district: {}, state: {}, pincode: {}'.format(address.name, address.district, address.state, address.pincode)) | def test_address_str(self):
user = sample_user()
payload = {'name': 'Name', 'line1': 'Line1', 'line2': 'Line2', 'city': 'City Name', 'district': 'Disctict Name', 'state': 'State', 'pincode': '123456', 'addressType': models.UserAddress.HOME}
address = models.UserAddress.objects.create(user=user, **payload)
self.assertEqual(str(address), 'name: {}, district: {}, state: {}, pincode: {}'.format(address.name, address.district, address.state, address.pincode))<|docstring|>Test the address string representation<|endoftext|> |
e089de9c10923aa0da7a570c9dc712e5ca00f40529c90ed7c67968ca05501672 | def test_category_str(self):
'Test the string representation of category object'
user = sample_user()
payload = {'name': 'Category', 'desc': 'Category desc'}
category = models.Category.objects.create(user=user, **payload)
self.assertEqual(str(category), payload['name']) | Test the string representation of category object | core/tests/test_models.py | test_category_str | abhishekmorya/organic-shop-rest-api | 0 | python | def test_category_str(self):
user = sample_user()
payload = {'name': 'Category', 'desc': 'Category desc'}
category = models.Category.objects.create(user=user, **payload)
self.assertEqual(str(category), payload['name']) | def test_category_str(self):
user = sample_user()
payload = {'name': 'Category', 'desc': 'Category desc'}
category = models.Category.objects.create(user=user, **payload)
self.assertEqual(str(category), payload['name'])<|docstring|>Test the string representation of category object<|endoftext|> |
622388af970c17a822f326ef62d7ceb51f46accfd3d7e2825636cbedb969d26d | def test_unique_categories(self):
'Test no duplicate categories are created'
user = sample_user()
models.Category.objects.create(user=user, name='Category')
with self.assertRaises(utils.IntegrityError):
models.Category.objects.create(user=user, name='Category') | Test no duplicate categories are created | core/tests/test_models.py | test_unique_categories | abhishekmorya/organic-shop-rest-api | 0 | python | def test_unique_categories(self):
user = sample_user()
models.Category.objects.create(user=user, name='Category')
with self.assertRaises(utils.IntegrityError):
models.Category.objects.create(user=user, name='Category') | def test_unique_categories(self):
user = sample_user()
models.Category.objects.create(user=user, name='Category')
with self.assertRaises(utils.IntegrityError):
models.Category.objects.create(user=user, name='Category')<|docstring|>Test no duplicate categories are created<|endoftext|> |
725ee92f9929c9120451d9e27decd75226dea4e9e489a017cd9357817e5f47b0 | def test_product_str(self):
'Test the string representation of product object'
user = sample_user()
category = sample_category(user=user)
payload = {'category': category, 'title': 'Brown Bread', 'desc': 'Healthy Brown bread', 'price': 23.0, 'quantity': 5, 'unit': models.Product.UNIT, 'image': 'image url'}
product = models.Product.objects.create(user=user, **payload)
self.assertEqual(str(product), payload['title']) | Test the string representation of product object | core/tests/test_models.py | test_product_str | abhishekmorya/organic-shop-rest-api | 0 | python | def test_product_str(self):
user = sample_user()
category = sample_category(user=user)
payload = {'category': category, 'title': 'Brown Bread', 'desc': 'Healthy Brown bread', 'price': 23.0, 'quantity': 5, 'unit': models.Product.UNIT, 'image': 'image url'}
product = models.Product.objects.create(user=user, **payload)
self.assertEqual(str(product), payload['title']) | def test_product_str(self):
user = sample_user()
category = sample_category(user=user)
payload = {'category': category, 'title': 'Brown Bread', 'desc': 'Healthy Brown bread', 'price': 23.0, 'quantity': 5, 'unit': models.Product.UNIT, 'image': 'image url'}
product = models.Product.objects.create(user=user, **payload)
self.assertEqual(str(product), payload['title'])<|docstring|>Test the string representation of product object<|endoftext|> |
c32d5ae4ae49883dc378b724cde4ca4e38b007b12bbbbb3637a0751291837d98 | @patch('uuid.uuid4')
def test_product_filename_uuid(self, mock_uuid):
'Test product image is saved to correct path'
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.uploaded_images_for_products(None, 'my-image.jpg')
exp_path = f'uploads/products/{uuid}.jpg'
self.assertEqual(file_path, exp_path) | Test product image is saved to correct path | core/tests/test_models.py | test_product_filename_uuid | abhishekmorya/organic-shop-rest-api | 0 | python | @patch('uuid.uuid4')
def test_product_filename_uuid(self, mock_uuid):
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.uploaded_images_for_products(None, 'my-image.jpg')
exp_path = f'uploads/products/{uuid}.jpg'
self.assertEqual(file_path, exp_path) | @patch('uuid.uuid4')
def test_product_filename_uuid(self, mock_uuid):
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.uploaded_images_for_products(None, 'my-image.jpg')
exp_path = f'uploads/products/{uuid}.jpg'
self.assertEqual(file_path, exp_path)<|docstring|>Test product image is saved to correct path<|endoftext|> |
d6a4f818ceb509013d50d0c040ed667b923f3a566fe0bae588144dd906a32775 | def test_one_to_one_mapping_of_user_address(self):
'Test the one to one mapping of user and UserAddress'
user = sample_user()
address1 = sample_address(user=user)
payload = {'name': 'Abhishek', 'line1': 'Line1', 'line2': 'Line2', 'city': 'Pune', 'district': 'Pune', 'state': 'Maharastra', 'pincode': '123456', 'addressType': models.UserAddress.HOME}
address2 = sample_address(user=user, **payload)
user_details = models.UserDetails.objects.create(user=user, selectedAddress=address1)
user_details.selectedAddress = address2
self.assertEqual(str(user_details.selectedAddress), str(address2)) | Test the one to one mapping of user and UserAddress | core/tests/test_models.py | test_one_to_one_mapping_of_user_address | abhishekmorya/organic-shop-rest-api | 0 | python | def test_one_to_one_mapping_of_user_address(self):
user = sample_user()
address1 = sample_address(user=user)
payload = {'name': 'Abhishek', 'line1': 'Line1', 'line2': 'Line2', 'city': 'Pune', 'district': 'Pune', 'state': 'Maharastra', 'pincode': '123456', 'addressType': models.UserAddress.HOME}
address2 = sample_address(user=user, **payload)
user_details = models.UserDetails.objects.create(user=user, selectedAddress=address1)
user_details.selectedAddress = address2
self.assertEqual(str(user_details.selectedAddress), str(address2)) | def test_one_to_one_mapping_of_user_address(self):
user = sample_user()
address1 = sample_address(user=user)
payload = {'name': 'Abhishek', 'line1': 'Line1', 'line2': 'Line2', 'city': 'Pune', 'district': 'Pune', 'state': 'Maharastra', 'pincode': '123456', 'addressType': models.UserAddress.HOME}
address2 = sample_address(user=user, **payload)
user_details = models.UserDetails.objects.create(user=user, selectedAddress=address1)
user_details.selectedAddress = address2
self.assertEqual(str(user_details.selectedAddress), str(address2))<|docstring|>Test the one to one mapping of user and UserAddress<|endoftext|> |
7f2569b6e0e29f4597c8a0a130311b2cbdaeedcd08cc4948d195f703b66429c2 | def test_shopping_cart_str(self):
'Test string representation of Shopping Cart Object'
user = sample_user()
product = sample_product(user)
payload = {'product': product, 'count': 2}
shoppingCart = models.ShoppingCart.objects.create(user=user, **payload)
models.ShoppingCart.objects.count()
self.assertEqual(f"{str(product)}, {payload['count']}", str(shoppingCart)) | Test string representation of Shopping Cart Object | core/tests/test_models.py | test_shopping_cart_str | abhishekmorya/organic-shop-rest-api | 0 | python | def test_shopping_cart_str(self):
user = sample_user()
product = sample_product(user)
payload = {'product': product, 'count': 2}
shoppingCart = models.ShoppingCart.objects.create(user=user, **payload)
models.ShoppingCart.objects.count()
self.assertEqual(f"{str(product)}, {payload['count']}", str(shoppingCart)) | def test_shopping_cart_str(self):
user = sample_user()
product = sample_product(user)
payload = {'product': product, 'count': 2}
shoppingCart = models.ShoppingCart.objects.create(user=user, **payload)
models.ShoppingCart.objects.count()
self.assertEqual(f"{str(product)}, {payload['count']}", str(shoppingCart))<|docstring|>Test string representation of Shopping Cart Object<|endoftext|> |
16010b87fb0cc528c71cfd0f4c9e6553d2b0618adc1c02ce325eb22603b8509c | def test_payment_mode_str(self):
'Test string representation of Payment mode object'
user = sample_user()
payload = {'title': 'UPI', 'desc': 'UPI payment', 'charges': 2.2, 'enabled': True}
paymentMode = models.PaymentMode.objects.create(user=user, **payload)
self.assertEqual(payload['title'], str(paymentMode)) | Test string representation of Payment mode object | core/tests/test_models.py | test_payment_mode_str | abhishekmorya/organic-shop-rest-api | 0 | python | def test_payment_mode_str(self):
user = sample_user()
payload = {'title': 'UPI', 'desc': 'UPI payment', 'charges': 2.2, 'enabled': True}
paymentMode = models.PaymentMode.objects.create(user=user, **payload)
self.assertEqual(payload['title'], str(paymentMode)) | def test_payment_mode_str(self):
user = sample_user()
payload = {'title': 'UPI', 'desc': 'UPI payment', 'charges': 2.2, 'enabled': True}
paymentMode = models.PaymentMode.objects.create(user=user, **payload)
self.assertEqual(payload['title'], str(paymentMode))<|docstring|>Test string representation of Payment mode object<|endoftext|> |
b5b727cfb391d0a7530954221705796267219176a5b60655000e27a811d2e691 | def test_offer_str(self):
'Test string representation of Offer object'
user = sample_user()
payload = {'title': 'Summer Season', 'percentage': 20.5, 'desc': 'Summer season sale of 2022', 'expiry_date': timezone.make_aware(datetime(2022, 6, 30, 23, 59, 59))}
offer = models.Offer.objects.create(user=user, **payload)
self.assertEqual(payload['title'], str(offer)) | Test string representation of Offer object | core/tests/test_models.py | test_offer_str | abhishekmorya/organic-shop-rest-api | 0 | python | def test_offer_str(self):
user = sample_user()
payload = {'title': 'Summer Season', 'percentage': 20.5, 'desc': 'Summer season sale of 2022', 'expiry_date': timezone.make_aware(datetime(2022, 6, 30, 23, 59, 59))}
offer = models.Offer.objects.create(user=user, **payload)
self.assertEqual(payload['title'], str(offer)) | def test_offer_str(self):
user = sample_user()
payload = {'title': 'Summer Season', 'percentage': 20.5, 'desc': 'Summer season sale of 2022', 'expiry_date': timezone.make_aware(datetime(2022, 6, 30, 23, 59, 59))}
offer = models.Offer.objects.create(user=user, **payload)
self.assertEqual(payload['title'], str(offer))<|docstring|>Test string representation of Offer object<|endoftext|> |
d712530189502c58e3ec2a6ca16fef0082b3f92b71ae219636e195a3e4bb14ff | def test_creating_order(self):
'Test string representation of Order'
user = sample_user()
address = sample_address(user)
payment_mode = models.PaymentMode.objects.create(user=user, **{'title': 'UPI', 'desc': 'UPI payment', 'charges': 2.2, 'enabled': True})
payload = {'shipping_address': address, 'billing_address': address, 'payment_mode': payment_mode}
order = models.Order.objects.create(user=user, **payload)
product1 = sample_product(user)
sc1 = models.ShoppingCart.objects.create(user=user, product=product1, count=4)
product_payload = {'category': sample_category(user, name='Sauce'), 'title': 'Jam', 'desc': 'The sweet sour Jam', 'price': 10.0, 'quantity': 2, 'unit': models.Product.UNIT, 'image': 'image url'}
product2 = sample_product(user, **product_payload)
sc2 = models.ShoppingCart.objects.create(user=user, product=product2, count=3)
order.cartItems.add(sc1, sc2)
offer = models.Offer.objects.create(user=user, title='Summer Sale', percentage=20.5, desc='Summer Sale 2022', expiry_date=timezone.make_aware(datetime(2022, 6, 30, 23, 59, 59)))
order.offers_applied.add(offer)
count = models.Order.objects.count()
self.assertEqual(count, 1)
self.assertEqual(order.cartItems.count(), 2)
self.assertEqual(order.offers_applied.count(), 1) | Test string representation of Order | core/tests/test_models.py | test_creating_order | abhishekmorya/organic-shop-rest-api | 0 | python | def test_creating_order(self):
user = sample_user()
address = sample_address(user)
payment_mode = models.PaymentMode.objects.create(user=user, **{'title': 'UPI', 'desc': 'UPI payment', 'charges': 2.2, 'enabled': True})
payload = {'shipping_address': address, 'billing_address': address, 'payment_mode': payment_mode}
order = models.Order.objects.create(user=user, **payload)
product1 = sample_product(user)
sc1 = models.ShoppingCart.objects.create(user=user, product=product1, count=4)
product_payload = {'category': sample_category(user, name='Sauce'), 'title': 'Jam', 'desc': 'The sweet sour Jam', 'price': 10.0, 'quantity': 2, 'unit': models.Product.UNIT, 'image': 'image url'}
product2 = sample_product(user, **product_payload)
sc2 = models.ShoppingCart.objects.create(user=user, product=product2, count=3)
order.cartItems.add(sc1, sc2)
offer = models.Offer.objects.create(user=user, title='Summer Sale', percentage=20.5, desc='Summer Sale 2022', expiry_date=timezone.make_aware(datetime(2022, 6, 30, 23, 59, 59)))
order.offers_applied.add(offer)
count = models.Order.objects.count()
self.assertEqual(count, 1)
self.assertEqual(order.cartItems.count(), 2)
self.assertEqual(order.offers_applied.count(), 1) | def test_creating_order(self):
user = sample_user()
address = sample_address(user)
payment_mode = models.PaymentMode.objects.create(user=user, **{'title': 'UPI', 'desc': 'UPI payment', 'charges': 2.2, 'enabled': True})
payload = {'shipping_address': address, 'billing_address': address, 'payment_mode': payment_mode}
order = models.Order.objects.create(user=user, **payload)
product1 = sample_product(user)
sc1 = models.ShoppingCart.objects.create(user=user, product=product1, count=4)
product_payload = {'category': sample_category(user, name='Sauce'), 'title': 'Jam', 'desc': 'The sweet sour Jam', 'price': 10.0, 'quantity': 2, 'unit': models.Product.UNIT, 'image': 'image url'}
product2 = sample_product(user, **product_payload)
sc2 = models.ShoppingCart.objects.create(user=user, product=product2, count=3)
order.cartItems.add(sc1, sc2)
offer = models.Offer.objects.create(user=user, title='Summer Sale', percentage=20.5, desc='Summer Sale 2022', expiry_date=timezone.make_aware(datetime(2022, 6, 30, 23, 59, 59)))
order.offers_applied.add(offer)
count = models.Order.objects.count()
self.assertEqual(count, 1)
self.assertEqual(order.cartItems.count(), 2)
self.assertEqual(order.offers_applied.count(), 1)<|docstring|>Test string representation of Order<|endoftext|> |
eba14f5ded88d5c6a0aa9874b4a92b0f863e04ccc18279a4825b32fa9f46539a | def test_creating_price_detail(self):
'Test creating price details'
user = sample_user()
address = sample_address(user)
payment_mode = models.PaymentMode.objects.create(user=user, **{'title': 'UPI', 'desc': 'UPI payment', 'charges': 2.2, 'enabled': True})
order_payload = {'shipping_address': address, 'billing_address': address, 'payment_mode': payment_mode}
order = models.Order.objects.create(user=user, **order_payload)
product1 = sample_product(user)
sc1 = models.ShoppingCart.objects.create(user=user, product=product1, count=4)
product_payload = {'category': sample_category(user, name='Sauce'), 'title': 'Jam', 'desc': 'The sweet sour Jam', 'price': 10.0, 'quantity': 2, 'unit': models.Product.UNIT, 'image': 'image url'}
product2 = sample_product(user, **product_payload)
sc2 = models.ShoppingCart.objects.create(user=user, product=product2, count=3)
order.cartItems.add(sc1, sc2)
offer = models.Offer.objects.create(user=user, title='Summer Sale', percentage=20.5, desc='Summer Sale 2022', expiry_date=timezone.make_aware(datetime(2022, 6, 30, 23, 59, 59)))
order.offers_applied.add(offer)
payload = {'order': order, 'delievery_charges': 50.5}
models.PriceDetail.objects.create(user=user, **payload)
self.assertEqual(1, models.PriceDetail.objects.count()) | Test creating price details | core/tests/test_models.py | test_creating_price_detail | abhishekmorya/organic-shop-rest-api | 0 | python | def test_creating_price_detail(self):
user = sample_user()
address = sample_address(user)
payment_mode = models.PaymentMode.objects.create(user=user, **{'title': 'UPI', 'desc': 'UPI payment', 'charges': 2.2, 'enabled': True})
order_payload = {'shipping_address': address, 'billing_address': address, 'payment_mode': payment_mode}
order = models.Order.objects.create(user=user, **order_payload)
product1 = sample_product(user)
sc1 = models.ShoppingCart.objects.create(user=user, product=product1, count=4)
product_payload = {'category': sample_category(user, name='Sauce'), 'title': 'Jam', 'desc': 'The sweet sour Jam', 'price': 10.0, 'quantity': 2, 'unit': models.Product.UNIT, 'image': 'image url'}
product2 = sample_product(user, **product_payload)
sc2 = models.ShoppingCart.objects.create(user=user, product=product2, count=3)
order.cartItems.add(sc1, sc2)
offer = models.Offer.objects.create(user=user, title='Summer Sale', percentage=20.5, desc='Summer Sale 2022', expiry_date=timezone.make_aware(datetime(2022, 6, 30, 23, 59, 59)))
order.offers_applied.add(offer)
payload = {'order': order, 'delievery_charges': 50.5}
models.PriceDetail.objects.create(user=user, **payload)
self.assertEqual(1, models.PriceDetail.objects.count()) | def test_creating_price_detail(self):
user = sample_user()
address = sample_address(user)
payment_mode = models.PaymentMode.objects.create(user=user, **{'title': 'UPI', 'desc': 'UPI payment', 'charges': 2.2, 'enabled': True})
order_payload = {'shipping_address': address, 'billing_address': address, 'payment_mode': payment_mode}
order = models.Order.objects.create(user=user, **order_payload)
product1 = sample_product(user)
sc1 = models.ShoppingCart.objects.create(user=user, product=product1, count=4)
product_payload = {'category': sample_category(user, name='Sauce'), 'title': 'Jam', 'desc': 'The sweet sour Jam', 'price': 10.0, 'quantity': 2, 'unit': models.Product.UNIT, 'image': 'image url'}
product2 = sample_product(user, **product_payload)
sc2 = models.ShoppingCart.objects.create(user=user, product=product2, count=3)
order.cartItems.add(sc1, sc2)
offer = models.Offer.objects.create(user=user, title='Summer Sale', percentage=20.5, desc='Summer Sale 2022', expiry_date=timezone.make_aware(datetime(2022, 6, 30, 23, 59, 59)))
order.offers_applied.add(offer)
payload = {'order': order, 'delievery_charges': 50.5}
models.PriceDetail.objects.create(user=user, **payload)
self.assertEqual(1, models.PriceDetail.objects.count())<|docstring|>Test creating price details<|endoftext|> |
999c53910143c0a94a6fb64f928c03c28baaa5adf8061822105625de5133c1b1 | def get_template():
"\n Obtain the 'template' plist which also contains things like\n default rules about which files should count\n "
current_dir = os.path.dirname(os.path.abspath(__file__))
template_path = os.path.join(current_dir, TEMPLATE_FILENAME)
fh = open(template_path, 'r')
return plistlib.readPlist(fh) | Obtain the 'template' plist which also contains things like
default rules about which files should count | isign/code_resources.py | get_template | lhcn/isign | 204 | python | def get_template():
"\n Obtain the 'template' plist which also contains things like\n default rules about which files should count\n "
current_dir = os.path.dirname(os.path.abspath(__file__))
template_path = os.path.join(current_dir, TEMPLATE_FILENAME)
fh = open(template_path, 'r')
return plistlib.readPlist(fh) | def get_template():
"\n Obtain the 'template' plist which also contains things like\n default rules about which files should count\n "
current_dir = os.path.dirname(os.path.abspath(__file__))
template_path = os.path.join(current_dir, TEMPLATE_FILENAME)
fh = open(template_path, 'r')
return plistlib.readPlist(fh)<|docstring|>Obtain the 'template' plist which also contains things like
default rules about which files should count<|endoftext|> |
b816a492c7636c755fca84428e3aa0d0f7cc9efd06e2593a0ae2f90d6e3bf127 | @memoize
def get_hash_hex(path, hash_type='sha1'):
' Get the hash of a file at path, encoded as hexadecimal '
if (hash_type == 'sha256'):
hasher = hashlib.sha256()
elif (hash_type == 'sha1'):
hasher = hashlib.sha1()
else:
raise ValueError('Incorrect hash type provided: {}'.format(hash_type))
with open(path, 'rb') as afile:
buf = afile.read(HASH_BLOCKSIZE)
while (len(buf) > 0):
hasher.update(buf)
buf = afile.read(HASH_BLOCKSIZE)
return hasher.hexdigest() | Get the hash of a file at path, encoded as hexadecimal | isign/code_resources.py | get_hash_hex | lhcn/isign | 204 | python | @memoize
def get_hash_hex(path, hash_type='sha1'):
' '
if (hash_type == 'sha256'):
hasher = hashlib.sha256()
elif (hash_type == 'sha1'):
hasher = hashlib.sha1()
else:
raise ValueError('Incorrect hash type provided: {}'.format(hash_type))
with open(path, 'rb') as afile:
buf = afile.read(HASH_BLOCKSIZE)
while (len(buf) > 0):
hasher.update(buf)
buf = afile.read(HASH_BLOCKSIZE)
return hasher.hexdigest() | @memoize
def get_hash_hex(path, hash_type='sha1'):
' '
if (hash_type == 'sha256'):
hasher = hashlib.sha256()
elif (hash_type == 'sha1'):
hasher = hashlib.sha1()
else:
raise ValueError('Incorrect hash type provided: {}'.format(hash_type))
with open(path, 'rb') as afile:
buf = afile.read(HASH_BLOCKSIZE)
while (len(buf) > 0):
hasher.update(buf)
buf = afile.read(HASH_BLOCKSIZE)
return hasher.hexdigest()<|docstring|>Get the hash of a file at path, encoded as hexadecimal<|endoftext|> |
e0dedeaa4e44189c2be18fcfd34e6586a36cab6b5ad3f2040c8630e2b5bd8475 | @memoize
def get_hash_binary(path, hash_type='sha1'):
' Get the hash of a file at path, encoded as binary '
return binascii.a2b_hex(get_hash_hex(path, hash_type)) | Get the hash of a file at path, encoded as binary | isign/code_resources.py | get_hash_binary | lhcn/isign | 204 | python | @memoize
def get_hash_binary(path, hash_type='sha1'):
' '
return binascii.a2b_hex(get_hash_hex(path, hash_type)) | @memoize
def get_hash_binary(path, hash_type='sha1'):
' '
return binascii.a2b_hex(get_hash_hex(path, hash_type))<|docstring|>Get the hash of a file at path, encoded as binary<|endoftext|> |
bb516347cbe5b319c5a8313a363c45f7c2fb741e42e8e7a50b9a4e41815e3263 | def write_plist(target_dir, plist):
' Write the CodeResources file '
output_dir = os.path.join(target_dir, OUTPUT_DIRECTORY)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, OUTPUT_FILENAME)
fh = open(output_path, 'w')
plistlib.writePlist(plist, fh)
return output_path | Write the CodeResources file | isign/code_resources.py | write_plist | lhcn/isign | 204 | python | def write_plist(target_dir, plist):
' '
output_dir = os.path.join(target_dir, OUTPUT_DIRECTORY)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, OUTPUT_FILENAME)
fh = open(output_path, 'w')
plistlib.writePlist(plist, fh)
return output_path | def write_plist(target_dir, plist):
' '
output_dir = os.path.join(target_dir, OUTPUT_DIRECTORY)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, OUTPUT_FILENAME)
fh = open(output_path, 'w')
plistlib.writePlist(plist, fh)
return output_path<|docstring|>Write the CodeResources file<|endoftext|> |
42b745bd774d1a622a5084503b5cba664dc088ae764e341229a516c93cf0bd4f | def make_seal(source_app_path, target_dir=None):
'\n Given a source app, create a CodeResources file for the\n surrounding directory, and write it into the appropriate path in a target\n directory\n '
if (target_dir is None):
target_dir = os.path.dirname(source_app_path)
template = get_template()
rules = template['rules']
plist = copy.deepcopy(template)
resource_builder = ResourceBuilder(source_app_path, rules, respect_omissions=False)
plist['files'] = resource_builder.scan()
rules2 = template['rules2']
resource_builder2 = ResourceBuilder(source_app_path, rules2, respect_omissions=True, include_sha256=True)
plist['files2'] = resource_builder2.scan()
return write_plist(target_dir, plist) | Given a source app, create a CodeResources file for the
surrounding directory, and write it into the appropriate path in a target
directory | isign/code_resources.py | make_seal | lhcn/isign | 204 | python | def make_seal(source_app_path, target_dir=None):
'\n Given a source app, create a CodeResources file for the\n surrounding directory, and write it into the appropriate path in a target\n directory\n '
if (target_dir is None):
target_dir = os.path.dirname(source_app_path)
template = get_template()
rules = template['rules']
plist = copy.deepcopy(template)
resource_builder = ResourceBuilder(source_app_path, rules, respect_omissions=False)
plist['files'] = resource_builder.scan()
rules2 = template['rules2']
resource_builder2 = ResourceBuilder(source_app_path, rules2, respect_omissions=True, include_sha256=True)
plist['files2'] = resource_builder2.scan()
return write_plist(target_dir, plist) | def make_seal(source_app_path, target_dir=None):
'\n Given a source app, create a CodeResources file for the\n surrounding directory, and write it into the appropriate path in a target\n directory\n '
if (target_dir is None):
target_dir = os.path.dirname(source_app_path)
template = get_template()
rules = template['rules']
plist = copy.deepcopy(template)
resource_builder = ResourceBuilder(source_app_path, rules, respect_omissions=False)
plist['files'] = resource_builder.scan()
rules2 = template['rules2']
resource_builder2 = ResourceBuilder(source_app_path, rules2, respect_omissions=True, include_sha256=True)
plist['files2'] = resource_builder2.scan()
return write_plist(target_dir, plist)<|docstring|>Given a source app, create a CodeResources file for the
surrounding directory, and write it into the appropriate path in a target
directory<|endoftext|> |
ec1e199891792ca36020d61be8d50f9a1e04819366d41e7a1ae0520397f813b7 | def scan(self):
'\n Walk entire directory, compile mapping\n path relative to source_dir -> digest and other data\n '
file_entries = {}
for (root, dirs, filenames) in os.walk(self.app_dir):
for filename in filenames:
(rule, path, relative_path) = self.get_rule_and_paths(root, filename)
if ((relative_path == 'CodeResources') and os.path.islink(path)):
continue
if rule.is_exclusion():
continue
if (rule.is_omitted() and (self.respect_omissions is True)):
continue
if (self.app_path == path):
continue
if os.path.islink(path):
if (not self.respect_omissions):
continue
val = {'symlink': os.readlink(path)}
else:
val = {'hash': plistlib.Data(get_hash_binary(path))}
if self.include_sha256:
val['hash2'] = plistlib.Data(get_hash_binary(path, 'sha256'))
if rule.is_optional():
val['optional'] = True
if ((len(val) == 1) and ('hash' in val)):
file_entries[relative_path] = val['hash']
else:
file_entries[relative_path] = val
for dirname in dirs:
(rule, path, relative_path) = self.get_rule_and_paths(root, dirname)
if (rule.is_nested() and ('.' not in path)):
dirs.remove(dirname)
continue
if (relative_path == OUTPUT_DIRECTORY):
dirs.remove(dirname)
return file_entries | Walk entire directory, compile mapping
path relative to source_dir -> digest and other data | isign/code_resources.py | scan | lhcn/isign | 204 | python | def scan(self):
'\n Walk entire directory, compile mapping\n path relative to source_dir -> digest and other data\n '
file_entries = {}
for (root, dirs, filenames) in os.walk(self.app_dir):
for filename in filenames:
(rule, path, relative_path) = self.get_rule_and_paths(root, filename)
if ((relative_path == 'CodeResources') and os.path.islink(path)):
continue
if rule.is_exclusion():
continue
if (rule.is_omitted() and (self.respect_omissions is True)):
continue
if (self.app_path == path):
continue
if os.path.islink(path):
if (not self.respect_omissions):
continue
val = {'symlink': os.readlink(path)}
else:
val = {'hash': plistlib.Data(get_hash_binary(path))}
if self.include_sha256:
val['hash2'] = plistlib.Data(get_hash_binary(path, 'sha256'))
if rule.is_optional():
val['optional'] = True
if ((len(val) == 1) and ('hash' in val)):
file_entries[relative_path] = val['hash']
else:
file_entries[relative_path] = val
for dirname in dirs:
(rule, path, relative_path) = self.get_rule_and_paths(root, dirname)
if (rule.is_nested() and ('.' not in path)):
dirs.remove(dirname)
continue
if (relative_path == OUTPUT_DIRECTORY):
dirs.remove(dirname)
return file_entries | def scan(self):
'\n Walk entire directory, compile mapping\n path relative to source_dir -> digest and other data\n '
file_entries = {}
for (root, dirs, filenames) in os.walk(self.app_dir):
for filename in filenames:
(rule, path, relative_path) = self.get_rule_and_paths(root, filename)
if ((relative_path == 'CodeResources') and os.path.islink(path)):
continue
if rule.is_exclusion():
continue
if (rule.is_omitted() and (self.respect_omissions is True)):
continue
if (self.app_path == path):
continue
if os.path.islink(path):
if (not self.respect_omissions):
continue
val = {'symlink': os.readlink(path)}
else:
val = {'hash': plistlib.Data(get_hash_binary(path))}
if self.include_sha256:
val['hash2'] = plistlib.Data(get_hash_binary(path, 'sha256'))
if rule.is_optional():
val['optional'] = True
if ((len(val) == 1) and ('hash' in val)):
file_entries[relative_path] = val['hash']
else:
file_entries[relative_path] = val
for dirname in dirs:
(rule, path, relative_path) = self.get_rule_and_paths(root, dirname)
if (rule.is_nested() and ('.' not in path)):
dirs.remove(dirname)
continue
if (relative_path == OUTPUT_DIRECTORY):
dirs.remove(dirname)
return file_entries<|docstring|>Walk entire directory, compile mapping
path relative to source_dir -> digest and other data<|endoftext|> |
4142d92502ae6b6138b4390cb74d1215cb9f85fb09e5594a6578df0a4a9c6a0b | def _acquire_changed(self, value=None, old_value=None, **kwargs):
"This is called when the 'acquire' signal changes."
if (self._status is None):
return
if ((old_value == 1) and (value == 0)):
self._status._finished() | This is called when the 'acquire' signal changes. | hxntools/detectors/hxn_xspress3.py | _acquire_changed | NSLS-II-HXN/hxntools | 0 | python | def _acquire_changed(self, value=None, old_value=None, **kwargs):
if (self._status is None):
return
if ((old_value == 1) and (value == 0)):
self._status._finished() | def _acquire_changed(self, value=None, old_value=None, **kwargs):
if (self._status is None):
return
if ((old_value == 1) and (value == 0)):
self._status._finished()<|docstring|>This is called when the 'acquire' signal changes.<|endoftext|> |
30ab2977529e301ccb43734c6b36047d8dd4f30d962b15c8e6f4d988837f802b | def fly_collect_rois(self, rois=None, *, ignore_get_failures=True):
'Read ROI data from the PVs\n\n Parameters\n ----------\n rois : sequence of Xspress3ROI instances, optional\n If unspecified, uses all currently enabled ROIs\n ignore_get_failures : bool, optional\n Ignore pyepics-related failures - will\n '
if (rois is None):
rois = self.enabled_rois
num_points = self.settings.num_images.get()
RoiTuple = namedtuple('Xspress3ROITuple', ['bin_low', 'bin_high', 'ev_low', 'ev_high', 'value', 'value_sum', 'enable'])
for roi in self.enabled_rois:
try:
roi_data = roi.settings.array_data.get(count=num_points, use_monitor=False)
except Exception as ex:
logger.error('Failed to get ROI data', exc_info=ex)
if (not ignore_get_failures):
raise
roi_data = np.zeros(num_points)
try:
roi_data = roi_data[:num_points]
except TypeError as ex:
logger.error('Failed to get ROI data', exc_info=ex)
if (not ignore_get_failures):
raise
roi_data = np.zeros(num_points)
roi_info = RoiTuple(bin_low=roi.bin_low.get(), bin_high=roi.bin_high.get(), ev_low=roi.ev_low.get(), ev_high=roi.ev_high.get(), value=roi_data, value_sum=None, enable=roi.enable.get())
(yield (roi.name, roi_info)) | Read ROI data from the PVs
Parameters
----------
rois : sequence of Xspress3ROI instances, optional
If unspecified, uses all currently enabled ROIs
ignore_get_failures : bool, optional
Ignore pyepics-related failures - will | hxntools/detectors/hxn_xspress3.py | fly_collect_rois | NSLS-II-HXN/hxntools | 0 | python | def fly_collect_rois(self, rois=None, *, ignore_get_failures=True):
'Read ROI data from the PVs\n\n Parameters\n ----------\n rois : sequence of Xspress3ROI instances, optional\n If unspecified, uses all currently enabled ROIs\n ignore_get_failures : bool, optional\n Ignore pyepics-related failures - will\n '
if (rois is None):
rois = self.enabled_rois
num_points = self.settings.num_images.get()
RoiTuple = namedtuple('Xspress3ROITuple', ['bin_low', 'bin_high', 'ev_low', 'ev_high', 'value', 'value_sum', 'enable'])
for roi in self.enabled_rois:
try:
roi_data = roi.settings.array_data.get(count=num_points, use_monitor=False)
except Exception as ex:
logger.error('Failed to get ROI data', exc_info=ex)
if (not ignore_get_failures):
raise
roi_data = np.zeros(num_points)
try:
roi_data = roi_data[:num_points]
except TypeError as ex:
logger.error('Failed to get ROI data', exc_info=ex)
if (not ignore_get_failures):
raise
roi_data = np.zeros(num_points)
roi_info = RoiTuple(bin_low=roi.bin_low.get(), bin_high=roi.bin_high.get(), ev_low=roi.ev_low.get(), ev_high=roi.ev_high.get(), value=roi_data, value_sum=None, enable=roi.enable.get())
(yield (roi.name, roi_info)) | def fly_collect_rois(self, rois=None, *, ignore_get_failures=True):
'Read ROI data from the PVs\n\n Parameters\n ----------\n rois : sequence of Xspress3ROI instances, optional\n If unspecified, uses all currently enabled ROIs\n ignore_get_failures : bool, optional\n Ignore pyepics-related failures - will\n '
if (rois is None):
rois = self.enabled_rois
num_points = self.settings.num_images.get()
RoiTuple = namedtuple('Xspress3ROITuple', ['bin_low', 'bin_high', 'ev_low', 'ev_high', 'value', 'value_sum', 'enable'])
for roi in self.enabled_rois:
try:
roi_data = roi.settings.array_data.get(count=num_points, use_monitor=False)
except Exception as ex:
logger.error('Failed to get ROI data', exc_info=ex)
if (not ignore_get_failures):
raise
roi_data = np.zeros(num_points)
try:
roi_data = roi_data[:num_points]
except TypeError as ex:
logger.error('Failed to get ROI data', exc_info=ex)
if (not ignore_get_failures):
raise
roi_data = np.zeros(num_points)
roi_info = RoiTuple(bin_low=roi.bin_low.get(), bin_high=roi.bin_high.get(), ev_low=roi.ev_low.get(), ev_high=roi.ev_high.get(), value=roi_data, value_sum=None, enable=roi.enable.get())
(yield (roi.name, roi_info))<|docstring|>Read ROI data from the PVs
Parameters
----------
rois : sequence of Xspress3ROI instances, optional
If unspecified, uses all currently enabled ROIs
ignore_get_failures : bool, optional
Ignore pyepics-related failures - will<|endoftext|> |
12377409bdb6dea307fd6ad72153af212ac0b88baf89af9a86f65a6bf78bd79c | def _update_shape_widgets(self):
'update controls that depend on the list of available shapes'
model = self.gui.get_object('ToolShapeList')
model.clear()
shapes = list(self.core.get('get_parameter_sets')('tool').values())
shapes.sort(key=(lambda item: item['weight']))
for shape in shapes:
model.append((shape['label'], shape['name']))
shape_names = [shape['name'] for shape in shapes]
for tool in self.get_all():
if (not (tool.get_value('shape').value in shape_names)):
self.get_collection().remove(tool)
self.gui.get_object('ToolNew').set_sensitive((len(model) > 0))
selector_box = self.gui.get_object('ToolSelectorBox')
if (len(model) < 2):
selector_box.hide()
else:
selector_box.show() | update controls that depend on the list of available shapes | pycam/pycam/Plugins/Tools.py | _update_shape_widgets | pschou/py-sdf | 0 | python | def _update_shape_widgets(self):
model = self.gui.get_object('ToolShapeList')
model.clear()
shapes = list(self.core.get('get_parameter_sets')('tool').values())
shapes.sort(key=(lambda item: item['weight']))
for shape in shapes:
model.append((shape['label'], shape['name']))
shape_names = [shape['name'] for shape in shapes]
for tool in self.get_all():
if (not (tool.get_value('shape').value in shape_names)):
self.get_collection().remove(tool)
self.gui.get_object('ToolNew').set_sensitive((len(model) > 0))
selector_box = self.gui.get_object('ToolSelectorBox')
if (len(model) < 2):
selector_box.hide()
else:
selector_box.show() | def _update_shape_widgets(self):
model = self.gui.get_object('ToolShapeList')
model.clear()
shapes = list(self.core.get('get_parameter_sets')('tool').values())
shapes.sort(key=(lambda item: item['weight']))
for shape in shapes:
model.append((shape['label'], shape['name']))
shape_names = [shape['name'] for shape in shapes]
for tool in self.get_all():
if (not (tool.get_value('shape').value in shape_names)):
self.get_collection().remove(tool)
self.gui.get_object('ToolNew').set_sensitive((len(model) > 0))
selector_box = self.gui.get_object('ToolSelectorBox')
if (len(model) < 2):
selector_box.hide()
else:
selector_box.show()<|docstring|>update controls that depend on the list of available shapes<|endoftext|> |
7eaef9fb5e16e07ffc6dce98a32066b83f7cac4de24cd8594c40708e5e2e5601 | def _update_tool_widgets(self, widget=None):
'transfer the content of the currently selected tool to the related widgets'
tool = self.get_selected()
control_box = self.gui.get_object('ToolSettingsControlsBox')
if (tool is None):
control_box.hide()
else:
with self.core.blocked_events({'tool-control-changed'}):
shape_name = tool.get_value('shape').value
self.select_shape(shape_name)
self.core.get('set_parameter_values')('tool', tool.get_dict())
control_box.show()
self.core.emit_event('tool-shape-changed') | transfer the content of the currently selected tool to the related widgets | pycam/pycam/Plugins/Tools.py | _update_tool_widgets | pschou/py-sdf | 0 | python | def _update_tool_widgets(self, widget=None):
tool = self.get_selected()
control_box = self.gui.get_object('ToolSettingsControlsBox')
if (tool is None):
control_box.hide()
else:
with self.core.blocked_events({'tool-control-changed'}):
shape_name = tool.get_value('shape').value
self.select_shape(shape_name)
self.core.get('set_parameter_values')('tool', tool.get_dict())
control_box.show()
self.core.emit_event('tool-shape-changed') | def _update_tool_widgets(self, widget=None):
tool = self.get_selected()
control_box = self.gui.get_object('ToolSettingsControlsBox')
if (tool is None):
control_box.hide()
else:
with self.core.blocked_events({'tool-control-changed'}):
shape_name = tool.get_value('shape').value
self.select_shape(shape_name)
self.core.get('set_parameter_values')('tool', tool.get_dict())
control_box.show()
self.core.emit_event('tool-shape-changed')<|docstring|>transfer the content of the currently selected tool to the related widgets<|endoftext|> |
4d7292700b49383c3818acb110c9dd3ef2ab84a1658fd613049da79e859f643a | def _transfer_controls_to_tool(self):
'the value of a tool-related control was changed by by the user\n\n The changed value needs to be transferred to the currently selected tool.\n '
tool = self.get_selected()
shape = self._get_selected_shape()
if (tool and shape):
tool.set_value('shape', shape['name'])
for (key, value) in self.core.get('get_parameter_values')('tool').items():
tool.set_value(key, value) | the value of a tool-related control was changed by by the user
The changed value needs to be transferred to the currently selected tool. | pycam/pycam/Plugins/Tools.py | _transfer_controls_to_tool | pschou/py-sdf | 0 | python | def _transfer_controls_to_tool(self):
'the value of a tool-related control was changed by by the user\n\n The changed value needs to be transferred to the currently selected tool.\n '
tool = self.get_selected()
shape = self._get_selected_shape()
if (tool and shape):
tool.set_value('shape', shape['name'])
for (key, value) in self.core.get('get_parameter_values')('tool').items():
tool.set_value(key, value) | def _transfer_controls_to_tool(self):
'the value of a tool-related control was changed by by the user\n\n The changed value needs to be transferred to the currently selected tool.\n '
tool = self.get_selected()
shape = self._get_selected_shape()
if (tool and shape):
tool.set_value('shape', shape['name'])
for (key, value) in self.core.get('get_parameter_values')('tool').items():
tool.set_value(key, value)<|docstring|>the value of a tool-related control was changed by by the user
The changed value needs to be transferred to the currently selected tool.<|endoftext|> |
37e8fa4bfc641a0c2854ef9db4fe0c7d00638b1bc6a6a4a6c3c8668cf1904506 | def getGeneralInfo(self):
'\n Return a dictionary containing all general info items. Format is <info_item>:<value>, where the type\n of the value is preserved. For CSV format, this will result in conversion to string and quotes where necessary, for\n JSON, the values will be interpreted and stored as JSON strings.\n '
return self.generalInfo | Return a dictionary containing all general info items. Format is <info_item>:<value>, where the type
of the value is preserved. For CSV format, this will result in conversion to string and quotes where necessary, for
JSON, the values will be interpreted and stored as JSON strings. | radiomics/generalinfo.py | getGeneralInfo | Meddebma/pyradiomics | 536 | python | def getGeneralInfo(self):
'\n Return a dictionary containing all general info items. Format is <info_item>:<value>, where the type\n of the value is preserved. For CSV format, this will result in conversion to string and quotes where necessary, for\n JSON, the values will be interpreted and stored as JSON strings.\n '
return self.generalInfo | def getGeneralInfo(self):
'\n Return a dictionary containing all general info items. Format is <info_item>:<value>, where the type\n of the value is preserved. For CSV format, this will result in conversion to string and quotes where necessary, for\n JSON, the values will be interpreted and stored as JSON strings.\n '
return self.generalInfo<|docstring|>Return a dictionary containing all general info items. Format is <info_item>:<value>, where the type
of the value is preserved. For CSV format, this will result in conversion to string and quotes where necessary, for
JSON, the values will be interpreted and stored as JSON strings.<|endoftext|> |
f4ffdc040aa60fac5747244d0a10d59166a67846796d3f433978e2af8d093142 | def addStaticElements(self):
'\n Adds the following elements to the general info:\n\n - Version: current version of PyRadiomics\n - NumpyVersion: version of numpy used\n - SimpleITKVersion: version SimpleITK used\n - PyWaveletVersion: version of PyWavelet used\n - PythonVersion: version of the python interpreter running PyRadiomics\n '
self.generalInfo[(self.generalInfo_prefix + 'Versions_PyRadiomics')] = radiomics.__version__
self.generalInfo[(self.generalInfo_prefix + 'Versions_Numpy')] = numpy.__version__
self.generalInfo[(self.generalInfo_prefix + 'Versions_SimpleITK')] = sitk.Version().VersionString()
self.generalInfo[(self.generalInfo_prefix + 'Versions_PyWavelet')] = pywt.__version__
self.generalInfo[(self.generalInfo_prefix + 'Versions_Python')] = ('%i.%i.%i' % sys.version_info[:3]) | Adds the following elements to the general info:
- Version: current version of PyRadiomics
- NumpyVersion: version of numpy used
- SimpleITKVersion: version SimpleITK used
- PyWaveletVersion: version of PyWavelet used
- PythonVersion: version of the python interpreter running PyRadiomics | radiomics/generalinfo.py | addStaticElements | Meddebma/pyradiomics | 536 | python | def addStaticElements(self):
'\n Adds the following elements to the general info:\n\n - Version: current version of PyRadiomics\n - NumpyVersion: version of numpy used\n - SimpleITKVersion: version SimpleITK used\n - PyWaveletVersion: version of PyWavelet used\n - PythonVersion: version of the python interpreter running PyRadiomics\n '
self.generalInfo[(self.generalInfo_prefix + 'Versions_PyRadiomics')] = radiomics.__version__
self.generalInfo[(self.generalInfo_prefix + 'Versions_Numpy')] = numpy.__version__
self.generalInfo[(self.generalInfo_prefix + 'Versions_SimpleITK')] = sitk.Version().VersionString()
self.generalInfo[(self.generalInfo_prefix + 'Versions_PyWavelet')] = pywt.__version__
self.generalInfo[(self.generalInfo_prefix + 'Versions_Python')] = ('%i.%i.%i' % sys.version_info[:3]) | def addStaticElements(self):
'\n Adds the following elements to the general info:\n\n - Version: current version of PyRadiomics\n - NumpyVersion: version of numpy used\n - SimpleITKVersion: version SimpleITK used\n - PyWaveletVersion: version of PyWavelet used\n - PythonVersion: version of the python interpreter running PyRadiomics\n '
self.generalInfo[(self.generalInfo_prefix + 'Versions_PyRadiomics')] = radiomics.__version__
self.generalInfo[(self.generalInfo_prefix + 'Versions_Numpy')] = numpy.__version__
self.generalInfo[(self.generalInfo_prefix + 'Versions_SimpleITK')] = sitk.Version().VersionString()
self.generalInfo[(self.generalInfo_prefix + 'Versions_PyWavelet')] = pywt.__version__
self.generalInfo[(self.generalInfo_prefix + 'Versions_Python')] = ('%i.%i.%i' % sys.version_info[:3])<|docstring|>Adds the following elements to the general info:
- Version: current version of PyRadiomics
- NumpyVersion: version of numpy used
- SimpleITKVersion: version SimpleITK used
- PyWaveletVersion: version of PyWavelet used
- PythonVersion: version of the python interpreter running PyRadiomics<|endoftext|> |
1b5a7f9cd43f346b779fff23f243918cee76577f443bb7cf1c5d166defea23ea | def addImageElements(self, image, prefix='original'):
'\n Calculates provenance info for the image\n\n Adds the following:\n\n - Hash: sha1 hash of the mask, which can be used to check if the same mask was used during reproducibility\n tests. (Only added when prefix is "original")\n - Dimensionality: Number of dimensions (e.g. 2D, 3D) in the image. (Only added when prefix is "original")\n - Spacing: Pixel spacing (x, y, z) in mm.\n - Size: Dimensions (x, y, z) of the image in number of voxels.\n - Mean: Mean intensity value over all voxels in the image.\n - Minimum: Minimum intensity value among all voxels in the image.\n - Maximum: Maximum intensity value among all voxels in the image.\n\n A prefix is added to indicate what type of image is described:\n\n - original: Image as loaded, without pre-processing.\n - interpolated: Image after it has been resampled to a new spacing (includes cropping).\n '
if (prefix == 'original'):
self.generalInfo[(self.generalInfo_prefix + 'Image-original_Hash')] = sitk.Hash(image)
self.generalInfo[(self.generalInfo_prefix + 'Image-original_Dimensionality')] = ('%iD' % image.GetDimension())
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Spacing')] = image.GetSpacing()
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Size')] = image.GetSize()
im_arr = sitk.GetArrayFromImage(image).astype('float')
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Mean')] = numpy.mean(im_arr)
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Minimum')] = numpy.min(im_arr)
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Maximum')] = numpy.max(im_arr) | Calculates provenance info for the image
Adds the following:
- Hash: sha1 hash of the mask, which can be used to check if the same mask was used during reproducibility
tests. (Only added when prefix is "original")
- Dimensionality: Number of dimensions (e.g. 2D, 3D) in the image. (Only added when prefix is "original")
- Spacing: Pixel spacing (x, y, z) in mm.
- Size: Dimensions (x, y, z) of the image in number of voxels.
- Mean: Mean intensity value over all voxels in the image.
- Minimum: Minimum intensity value among all voxels in the image.
- Maximum: Maximum intensity value among all voxels in the image.
A prefix is added to indicate what type of image is described:
- original: Image as loaded, without pre-processing.
- interpolated: Image after it has been resampled to a new spacing (includes cropping). | radiomics/generalinfo.py | addImageElements | Meddebma/pyradiomics | 536 | python | def addImageElements(self, image, prefix='original'):
'\n Calculates provenance info for the image\n\n Adds the following:\n\n - Hash: sha1 hash of the mask, which can be used to check if the same mask was used during reproducibility\n tests. (Only added when prefix is "original")\n - Dimensionality: Number of dimensions (e.g. 2D, 3D) in the image. (Only added when prefix is "original")\n - Spacing: Pixel spacing (x, y, z) in mm.\n - Size: Dimensions (x, y, z) of the image in number of voxels.\n - Mean: Mean intensity value over all voxels in the image.\n - Minimum: Minimum intensity value among all voxels in the image.\n - Maximum: Maximum intensity value among all voxels in the image.\n\n A prefix is added to indicate what type of image is described:\n\n - original: Image as loaded, without pre-processing.\n - interpolated: Image after it has been resampled to a new spacing (includes cropping).\n '
if (prefix == 'original'):
self.generalInfo[(self.generalInfo_prefix + 'Image-original_Hash')] = sitk.Hash(image)
self.generalInfo[(self.generalInfo_prefix + 'Image-original_Dimensionality')] = ('%iD' % image.GetDimension())
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Spacing')] = image.GetSpacing()
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Size')] = image.GetSize()
im_arr = sitk.GetArrayFromImage(image).astype('float')
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Mean')] = numpy.mean(im_arr)
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Minimum')] = numpy.min(im_arr)
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Maximum')] = numpy.max(im_arr) | def addImageElements(self, image, prefix='original'):
'\n Calculates provenance info for the image\n\n Adds the following:\n\n - Hash: sha1 hash of the mask, which can be used to check if the same mask was used during reproducibility\n tests. (Only added when prefix is "original")\n - Dimensionality: Number of dimensions (e.g. 2D, 3D) in the image. (Only added when prefix is "original")\n - Spacing: Pixel spacing (x, y, z) in mm.\n - Size: Dimensions (x, y, z) of the image in number of voxels.\n - Mean: Mean intensity value over all voxels in the image.\n - Minimum: Minimum intensity value among all voxels in the image.\n - Maximum: Maximum intensity value among all voxels in the image.\n\n A prefix is added to indicate what type of image is described:\n\n - original: Image as loaded, without pre-processing.\n - interpolated: Image after it has been resampled to a new spacing (includes cropping).\n '
if (prefix == 'original'):
self.generalInfo[(self.generalInfo_prefix + 'Image-original_Hash')] = sitk.Hash(image)
self.generalInfo[(self.generalInfo_prefix + 'Image-original_Dimensionality')] = ('%iD' % image.GetDimension())
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Spacing')] = image.GetSpacing()
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Size')] = image.GetSize()
im_arr = sitk.GetArrayFromImage(image).astype('float')
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Mean')] = numpy.mean(im_arr)
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Minimum')] = numpy.min(im_arr)
self.generalInfo[(((self.generalInfo_prefix + 'Image-') + prefix) + '_Maximum')] = numpy.max(im_arr)<|docstring|>Calculates provenance info for the image
Adds the following:
- Hash: sha1 hash of the mask, which can be used to check if the same mask was used during reproducibility
tests. (Only added when prefix is "original")
- Dimensionality: Number of dimensions (e.g. 2D, 3D) in the image. (Only added when prefix is "original")
- Spacing: Pixel spacing (x, y, z) in mm.
- Size: Dimensions (x, y, z) of the image in number of voxels.
- Mean: Mean intensity value over all voxels in the image.
- Minimum: Minimum intensity value among all voxels in the image.
- Maximum: Maximum intensity value among all voxels in the image.
A prefix is added to indicate what type of image is described:
- original: Image as loaded, without pre-processing.
- interpolated: Image after it has been resampled to a new spacing (includes cropping).<|endoftext|> |
9c8f2ab11d2c63b7c60ccabc2d3e7d2167dae014ba3617dc281e08cf34031140 | def addMaskElements(self, image, mask, label, prefix='original'):
'\n Calculates provenance info for the mask\n\n Adds the following:\n\n - MaskHash: sha1 hash of the mask, which can be used to check if the same mask was used during reproducibility\n tests. (Only added when prefix is "original")\n - BoundingBox: bounding box of the ROI defined by the specified label:\n Elements 0, 1 and 2 are the x, y and z coordinates of the lower bound, respectively.\n Elements 3, 4 and 5 are the size of the bounding box in x, y and z direction, respectively.\n - VoxelNum: Number of voxels included in the ROI defined by the specified label.\n - VolumeNum: Number of fully connected (26-connectivity) volumes in the ROI defined by the specified label.\n - CenterOfMassIndex: x, y and z coordinates of the center of mass of the ROI in terms of the image coordinate space\n (continuous index).\n - CenterOfMass: the real-world x, y and z coordinates of the center of mass of the ROI\n - ROIMean: Mean intensity value over all voxels in the ROI defined by the specified label.\n - ROIMinimum: Minimum intensity value among all voxels in the ROI defined by the specified label.\n - ROIMaximum: Maximum intensity value among all voxels in the ROI defined by the specified label.\n\n A prefix is added to indicate what type of mask is described:\n\n - original: Mask as loaded, without pre-processing.\n - corrected: Mask after it has been corrected by :py:func:`imageoperations.checkMask`.\n - interpolated: Mask after it has been resampled to a new spacing (includes cropping).\n - resegmented: Mask after resegmentation has been applied.\n '
if (mask is None):
return
if (prefix == 'original'):
self.generalInfo[(self.generalInfo_prefix + 'Mask-original_Hash')] = sitk.Hash(mask)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Spacing')] = mask.GetSpacing()
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Size')] = mask.GetSize()
lssif = sitk.LabelShapeStatisticsImageFilter()
lssif.Execute(mask)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_BoundingBox')] = lssif.GetBoundingBox(label)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_VoxelNum')] = lssif.GetNumberOfPixels(label)
labelMap = (mask == label)
ccif = sitk.ConnectedComponentImageFilter()
ccif.FullyConnectedOn()
ccif.Execute(labelMap)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_VolumeNum')] = ccif.GetObjectCount()
ma_arr = (sitk.GetArrayFromImage(labelMap) == 1)
maskCoordinates = numpy.array(numpy.where(ma_arr))
center_index = tuple(numpy.mean(maskCoordinates, axis=1)[::(- 1)])
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_CenterOfMassIndex')] = center_index
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_CenterOfMass')] = mask.TransformContinuousIndexToPhysicalPoint(center_index)
if (image is None):
return
im_arr = sitk.GetArrayFromImage(image)
targetvoxels = im_arr[ma_arr].astype('float')
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Mean')] = numpy.mean(targetvoxels)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Minimum')] = numpy.min(targetvoxels)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Maximum')] = numpy.max(targetvoxels) | Calculates provenance info for the mask
Adds the following:
- MaskHash: sha1 hash of the mask, which can be used to check if the same mask was used during reproducibility
tests. (Only added when prefix is "original")
- BoundingBox: bounding box of the ROI defined by the specified label:
Elements 0, 1 and 2 are the x, y and z coordinates of the lower bound, respectively.
Elements 3, 4 and 5 are the size of the bounding box in x, y and z direction, respectively.
- VoxelNum: Number of voxels included in the ROI defined by the specified label.
- VolumeNum: Number of fully connected (26-connectivity) volumes in the ROI defined by the specified label.
- CenterOfMassIndex: x, y and z coordinates of the center of mass of the ROI in terms of the image coordinate space
(continuous index).
- CenterOfMass: the real-world x, y and z coordinates of the center of mass of the ROI
- ROIMean: Mean intensity value over all voxels in the ROI defined by the specified label.
- ROIMinimum: Minimum intensity value among all voxels in the ROI defined by the specified label.
- ROIMaximum: Maximum intensity value among all voxels in the ROI defined by the specified label.
A prefix is added to indicate what type of mask is described:
- original: Mask as loaded, without pre-processing.
- corrected: Mask after it has been corrected by :py:func:`imageoperations.checkMask`.
- interpolated: Mask after it has been resampled to a new spacing (includes cropping).
- resegmented: Mask after resegmentation has been applied. | radiomics/generalinfo.py | addMaskElements | Meddebma/pyradiomics | 536 | python | def addMaskElements(self, image, mask, label, prefix='original'):
'\n Calculates provenance info for the mask\n\n Adds the following:\n\n - MaskHash: sha1 hash of the mask, which can be used to check if the same mask was used during reproducibility\n tests. (Only added when prefix is "original")\n - BoundingBox: bounding box of the ROI defined by the specified label:\n Elements 0, 1 and 2 are the x, y and z coordinates of the lower bound, respectively.\n Elements 3, 4 and 5 are the size of the bounding box in x, y and z direction, respectively.\n - VoxelNum: Number of voxels included in the ROI defined by the specified label.\n - VolumeNum: Number of fully connected (26-connectivity) volumes in the ROI defined by the specified label.\n - CenterOfMassIndex: x, y and z coordinates of the center of mass of the ROI in terms of the image coordinate space\n (continuous index).\n - CenterOfMass: the real-world x, y and z coordinates of the center of mass of the ROI\n - ROIMean: Mean intensity value over all voxels in the ROI defined by the specified label.\n - ROIMinimum: Minimum intensity value among all voxels in the ROI defined by the specified label.\n - ROIMaximum: Maximum intensity value among all voxels in the ROI defined by the specified label.\n\n A prefix is added to indicate what type of mask is described:\n\n - original: Mask as loaded, without pre-processing.\n - corrected: Mask after it has been corrected by :py:func:`imageoperations.checkMask`.\n - interpolated: Mask after it has been resampled to a new spacing (includes cropping).\n - resegmented: Mask after resegmentation has been applied.\n '
if (mask is None):
return
if (prefix == 'original'):
self.generalInfo[(self.generalInfo_prefix + 'Mask-original_Hash')] = sitk.Hash(mask)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Spacing')] = mask.GetSpacing()
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Size')] = mask.GetSize()
lssif = sitk.LabelShapeStatisticsImageFilter()
lssif.Execute(mask)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_BoundingBox')] = lssif.GetBoundingBox(label)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_VoxelNum')] = lssif.GetNumberOfPixels(label)
labelMap = (mask == label)
ccif = sitk.ConnectedComponentImageFilter()
ccif.FullyConnectedOn()
ccif.Execute(labelMap)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_VolumeNum')] = ccif.GetObjectCount()
ma_arr = (sitk.GetArrayFromImage(labelMap) == 1)
maskCoordinates = numpy.array(numpy.where(ma_arr))
center_index = tuple(numpy.mean(maskCoordinates, axis=1)[::(- 1)])
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_CenterOfMassIndex')] = center_index
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_CenterOfMass')] = mask.TransformContinuousIndexToPhysicalPoint(center_index)
if (image is None):
return
im_arr = sitk.GetArrayFromImage(image)
targetvoxels = im_arr[ma_arr].astype('float')
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Mean')] = numpy.mean(targetvoxels)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Minimum')] = numpy.min(targetvoxels)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Maximum')] = numpy.max(targetvoxels) | def addMaskElements(self, image, mask, label, prefix='original'):
'\n Calculates provenance info for the mask\n\n Adds the following:\n\n - MaskHash: sha1 hash of the mask, which can be used to check if the same mask was used during reproducibility\n tests. (Only added when prefix is "original")\n - BoundingBox: bounding box of the ROI defined by the specified label:\n Elements 0, 1 and 2 are the x, y and z coordinates of the lower bound, respectively.\n Elements 3, 4 and 5 are the size of the bounding box in x, y and z direction, respectively.\n - VoxelNum: Number of voxels included in the ROI defined by the specified label.\n - VolumeNum: Number of fully connected (26-connectivity) volumes in the ROI defined by the specified label.\n - CenterOfMassIndex: x, y and z coordinates of the center of mass of the ROI in terms of the image coordinate space\n (continuous index).\n - CenterOfMass: the real-world x, y and z coordinates of the center of mass of the ROI\n - ROIMean: Mean intensity value over all voxels in the ROI defined by the specified label.\n - ROIMinimum: Minimum intensity value among all voxels in the ROI defined by the specified label.\n - ROIMaximum: Maximum intensity value among all voxels in the ROI defined by the specified label.\n\n A prefix is added to indicate what type of mask is described:\n\n - original: Mask as loaded, without pre-processing.\n - corrected: Mask after it has been corrected by :py:func:`imageoperations.checkMask`.\n - interpolated: Mask after it has been resampled to a new spacing (includes cropping).\n - resegmented: Mask after resegmentation has been applied.\n '
if (mask is None):
return
if (prefix == 'original'):
self.generalInfo[(self.generalInfo_prefix + 'Mask-original_Hash')] = sitk.Hash(mask)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Spacing')] = mask.GetSpacing()
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Size')] = mask.GetSize()
lssif = sitk.LabelShapeStatisticsImageFilter()
lssif.Execute(mask)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_BoundingBox')] = lssif.GetBoundingBox(label)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_VoxelNum')] = lssif.GetNumberOfPixels(label)
labelMap = (mask == label)
ccif = sitk.ConnectedComponentImageFilter()
ccif.FullyConnectedOn()
ccif.Execute(labelMap)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_VolumeNum')] = ccif.GetObjectCount()
ma_arr = (sitk.GetArrayFromImage(labelMap) == 1)
maskCoordinates = numpy.array(numpy.where(ma_arr))
center_index = tuple(numpy.mean(maskCoordinates, axis=1)[::(- 1)])
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_CenterOfMassIndex')] = center_index
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_CenterOfMass')] = mask.TransformContinuousIndexToPhysicalPoint(center_index)
if (image is None):
return
im_arr = sitk.GetArrayFromImage(image)
targetvoxels = im_arr[ma_arr].astype('float')
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Mean')] = numpy.mean(targetvoxels)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Minimum')] = numpy.min(targetvoxels)
self.generalInfo[(((self.generalInfo_prefix + 'Mask-') + prefix) + '_Maximum')] = numpy.max(targetvoxels)<|docstring|>Calculates provenance info for the mask
Adds the following:
- MaskHash: sha1 hash of the mask, which can be used to check if the same mask was used during reproducibility
tests. (Only added when prefix is "original")
- BoundingBox: bounding box of the ROI defined by the specified label:
Elements 0, 1 and 2 are the x, y and z coordinates of the lower bound, respectively.
Elements 3, 4 and 5 are the size of the bounding box in x, y and z direction, respectively.
- VoxelNum: Number of voxels included in the ROI defined by the specified label.
- VolumeNum: Number of fully connected (26-connectivity) volumes in the ROI defined by the specified label.
- CenterOfMassIndex: x, y and z coordinates of the center of mass of the ROI in terms of the image coordinate space
(continuous index).
- CenterOfMass: the real-world x, y and z coordinates of the center of mass of the ROI
- ROIMean: Mean intensity value over all voxels in the ROI defined by the specified label.
- ROIMinimum: Minimum intensity value among all voxels in the ROI defined by the specified label.
- ROIMaximum: Maximum intensity value among all voxels in the ROI defined by the specified label.
A prefix is added to indicate what type of mask is described:
- original: Mask as loaded, without pre-processing.
- corrected: Mask after it has been corrected by :py:func:`imageoperations.checkMask`.
- interpolated: Mask after it has been resampled to a new spacing (includes cropping).
- resegmented: Mask after resegmentation has been applied.<|endoftext|> |
9fbdbb1bc92c210ab7502e7634f3aefe56b62d566b8ae94b30cee92f249178ae | def addGeneralSettings(self, settings):
'\n Add a string representation of the general settings.\n Format is {<settings_name>:<value>, ...}.\n '
self.generalInfo[(self.generalInfo_prefix + 'Configuration_Settings')] = settings | Add a string representation of the general settings.
Format is {<settings_name>:<value>, ...}. | radiomics/generalinfo.py | addGeneralSettings | Meddebma/pyradiomics | 536 | python | def addGeneralSettings(self, settings):
'\n Add a string representation of the general settings.\n Format is {<settings_name>:<value>, ...}.\n '
self.generalInfo[(self.generalInfo_prefix + 'Configuration_Settings')] = settings | def addGeneralSettings(self, settings):
'\n Add a string representation of the general settings.\n Format is {<settings_name>:<value>, ...}.\n '
self.generalInfo[(self.generalInfo_prefix + 'Configuration_Settings')] = settings<|docstring|>Add a string representation of the general settings.
Format is {<settings_name>:<value>, ...}.<|endoftext|> |
20d29ae9a1b57aa9890a87711525eac5e073b9ee45e3d7340809f47373e54a62 | def addEnabledImageTypes(self, enabledImageTypes):
'\n Add a string representation of the enabled image types and any custom settings for each image type.\n Format is {<imageType_name>:{<setting_name>:<value>, ...}, ...}.\n '
self.generalInfo[(self.generalInfo_prefix + 'Configuration_EnabledImageTypes')] = enabledImageTypes | Add a string representation of the enabled image types and any custom settings for each image type.
Format is {<imageType_name>:{<setting_name>:<value>, ...}, ...}. | radiomics/generalinfo.py | addEnabledImageTypes | Meddebma/pyradiomics | 536 | python | def addEnabledImageTypes(self, enabledImageTypes):
'\n Add a string representation of the enabled image types and any custom settings for each image type.\n Format is {<imageType_name>:{<setting_name>:<value>, ...}, ...}.\n '
self.generalInfo[(self.generalInfo_prefix + 'Configuration_EnabledImageTypes')] = enabledImageTypes | def addEnabledImageTypes(self, enabledImageTypes):
'\n Add a string representation of the enabled image types and any custom settings for each image type.\n Format is {<imageType_name>:{<setting_name>:<value>, ...}, ...}.\n '
self.generalInfo[(self.generalInfo_prefix + 'Configuration_EnabledImageTypes')] = enabledImageTypes<|docstring|>Add a string representation of the enabled image types and any custom settings for each image type.
Format is {<imageType_name>:{<setting_name>:<value>, ...}, ...}.<|endoftext|> |
8bd7ccbfd765723132e9e70babe27e075362b1a9a889751b056dca01b51d73b3 | def __call__(self, inputs: Image.Image) -> List[Dict[(str, Any)]]:
'\n Args:\n inputs (:obj:`PIL.Image`):\n The raw image representation as PIL.\n No transformation made whatsoever from the input. Make all necessary transformations here.\n Return:\n A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}\n It is preferred if the returned list is in decreasing `score` order\n '
img = inputs.convert('RGB')
config = self.model.default_cfg
if isinstance(config['input_size'], tuple):
img_size = config['input_size'][(- 2):]
else:
img_size = config['input_size']
transform = timm.data.transforms_factory.transforms_imagenet_eval(img_size=img_size, interpolation=config['interpolation'], mean=config['mean'], std=config['std'])
input_tensor = transform(img)
input_tensor = input_tensor.unsqueeze(0)
with torch.no_grad():
output = self.model(input_tensor)
probs = output.squeeze(0).softmax(dim=0)
(values, indices) = torch.topk(probs, k=5)
labels = [{'label': IMAGENET_LABELS[i], 'score': v.item()} for (i, v) in zip(indices, values)]
return labels | Args:
inputs (:obj:`PIL.Image`):
The raw image representation as PIL.
No transformation made whatsoever from the input. Make all necessary transformations here.
Return:
A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}
It is preferred if the returned list is in decreasing `score` order | api-inference-community/docker_images/timm/app/pipelines/image_classification.py | __call__ | abidlabs/huggingface_hub | 0 | python | def __call__(self, inputs: Image.Image) -> List[Dict[(str, Any)]]:
'\n Args:\n inputs (:obj:`PIL.Image`):\n The raw image representation as PIL.\n No transformation made whatsoever from the input. Make all necessary transformations here.\n Return:\n A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}\n It is preferred if the returned list is in decreasing `score` order\n '
img = inputs.convert('RGB')
config = self.model.default_cfg
if isinstance(config['input_size'], tuple):
img_size = config['input_size'][(- 2):]
else:
img_size = config['input_size']
transform = timm.data.transforms_factory.transforms_imagenet_eval(img_size=img_size, interpolation=config['interpolation'], mean=config['mean'], std=config['std'])
input_tensor = transform(img)
input_tensor = input_tensor.unsqueeze(0)
with torch.no_grad():
output = self.model(input_tensor)
probs = output.squeeze(0).softmax(dim=0)
(values, indices) = torch.topk(probs, k=5)
labels = [{'label': IMAGENET_LABELS[i], 'score': v.item()} for (i, v) in zip(indices, values)]
return labels | def __call__(self, inputs: Image.Image) -> List[Dict[(str, Any)]]:
'\n Args:\n inputs (:obj:`PIL.Image`):\n The raw image representation as PIL.\n No transformation made whatsoever from the input. Make all necessary transformations here.\n Return:\n A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}\n It is preferred if the returned list is in decreasing `score` order\n '
img = inputs.convert('RGB')
config = self.model.default_cfg
if isinstance(config['input_size'], tuple):
img_size = config['input_size'][(- 2):]
else:
img_size = config['input_size']
transform = timm.data.transforms_factory.transforms_imagenet_eval(img_size=img_size, interpolation=config['interpolation'], mean=config['mean'], std=config['std'])
input_tensor = transform(img)
input_tensor = input_tensor.unsqueeze(0)
with torch.no_grad():
output = self.model(input_tensor)
probs = output.squeeze(0).softmax(dim=0)
(values, indices) = torch.topk(probs, k=5)
labels = [{'label': IMAGENET_LABELS[i], 'score': v.item()} for (i, v) in zip(indices, values)]
return labels<|docstring|>Args:
inputs (:obj:`PIL.Image`):
The raw image representation as PIL.
No transformation made whatsoever from the input. Make all necessary transformations here.
Return:
A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}
It is preferred if the returned list is in decreasing `score` order<|endoftext|> |
a5cd479c27bee0f75c81cb152593c5d45b7856fe33d25918cff1ba6d4922f21e | @j.baseclasses.actor_method
def file_get(self, doctype, guid1, guid2, schema_out=None, user_session=None):
'\n ```in\n doctype = "" (S)\n guid1 = (S)\n guid2 = "" (S)\n ```\n ```out\n res = "" (S)\n error_message = "" (S)\n error_code = 0 (I)\n ```\n :param collection:\n :param bucket:\n :param text:\n :return:\n '
out = schema_out.new()
if (not (doctype in doctypes_map)):
out.error_code = (- 1)
allowed_types = ', '.join(doctypes_map.keys())
out.error_message = f"invalid document type of '{doctype}', allowed types are {allowed_types}."
return out
service_name = doctypes_map[doctype]
try:
parent_dir = j.sal.fs.joinPaths(STATIC_DIR, doctype)
if (not j.sal.fs.exists(parent_dir)):
j.sal.fs.createDir(parent_dir)
cl = j.clients.gdrive.get('main')
if (doctype in ['document', 'spreadsheets', 'presentation']):
path = j.sal.fs.joinPaths(parent_dir, '{}.pdf'.format(guid1))
cl.exportFile(guid1, destpath=path, service_name=service_name, service_version='v3')
out.res = '/gdrive_static/{}/{}.pdf'.format(doctype, guid1)
elif (doctype == 'slide'):
cl.exportSlides(guid1, parent_dir)
if j.sal.fs.exists('{}/{}/{}.png'.format(parent_dir, guid1, guid2), followlinks=True):
out.res = '/gdrive_static/slide/{}/{}.png'.format(guid1, guid2)
else:
meta = cl.get_presentation_meta('{}/presentations.meta.json'.format(parent_dir), guid1)
if (guid2 in meta):
guid2 = meta[guid2]
guid2 = guid2.split('_', maxsplit=1)[1]
out.res = '/gdrive_static/slide/{}/{}'.format(guid1, guid2)
except GoogleApiHTTPError as api_http_error:
error = j.data.serializers.json.loads(api_http_error.content)['error']
out.error_code = error['code']
out.error_message = error['message']
return out | ```in
doctype = "" (S)
guid1 = (S)
guid2 = "" (S)
```
```out
res = "" (S)
error_message = "" (S)
error_code = 0 (I)
```
:param collection:
:param bucket:
:param text:
:return: | ThreeBotPackages/zerobot/webinterface/actors/wiki_gdrive_manager.py | file_get | grimpy/jumpscaleX_threebot | 0 | python | @j.baseclasses.actor_method
def file_get(self, doctype, guid1, guid2, schema_out=None, user_session=None):
'\n ```in\n doctype = (S)\n guid1 = (S)\n guid2 = (S)\n ```\n ```out\n res = (S)\n error_message = (S)\n error_code = 0 (I)\n ```\n :param collection:\n :param bucket:\n :param text:\n :return:\n '
out = schema_out.new()
if (not (doctype in doctypes_map)):
out.error_code = (- 1)
allowed_types = ', '.join(doctypes_map.keys())
out.error_message = f"invalid document type of '{doctype}', allowed types are {allowed_types}."
return out
service_name = doctypes_map[doctype]
try:
parent_dir = j.sal.fs.joinPaths(STATIC_DIR, doctype)
if (not j.sal.fs.exists(parent_dir)):
j.sal.fs.createDir(parent_dir)
cl = j.clients.gdrive.get('main')
if (doctype in ['document', 'spreadsheets', 'presentation']):
path = j.sal.fs.joinPaths(parent_dir, '{}.pdf'.format(guid1))
cl.exportFile(guid1, destpath=path, service_name=service_name, service_version='v3')
out.res = '/gdrive_static/{}/{}.pdf'.format(doctype, guid1)
elif (doctype == 'slide'):
cl.exportSlides(guid1, parent_dir)
if j.sal.fs.exists('{}/{}/{}.png'.format(parent_dir, guid1, guid2), followlinks=True):
out.res = '/gdrive_static/slide/{}/{}.png'.format(guid1, guid2)
else:
meta = cl.get_presentation_meta('{}/presentations.meta.json'.format(parent_dir), guid1)
if (guid2 in meta):
guid2 = meta[guid2]
guid2 = guid2.split('_', maxsplit=1)[1]
out.res = '/gdrive_static/slide/{}/{}'.format(guid1, guid2)
except GoogleApiHTTPError as api_http_error:
error = j.data.serializers.json.loads(api_http_error.content)['error']
out.error_code = error['code']
out.error_message = error['message']
return out | @j.baseclasses.actor_method
def file_get(self, doctype, guid1, guid2, schema_out=None, user_session=None):
'\n ```in\n doctype = (S)\n guid1 = (S)\n guid2 = (S)\n ```\n ```out\n res = (S)\n error_message = (S)\n error_code = 0 (I)\n ```\n :param collection:\n :param bucket:\n :param text:\n :return:\n '
out = schema_out.new()
if (not (doctype in doctypes_map)):
out.error_code = (- 1)
allowed_types = ', '.join(doctypes_map.keys())
out.error_message = f"invalid document type of '{doctype}', allowed types are {allowed_types}."
return out
service_name = doctypes_map[doctype]
try:
parent_dir = j.sal.fs.joinPaths(STATIC_DIR, doctype)
if (not j.sal.fs.exists(parent_dir)):
j.sal.fs.createDir(parent_dir)
cl = j.clients.gdrive.get('main')
if (doctype in ['document', 'spreadsheets', 'presentation']):
path = j.sal.fs.joinPaths(parent_dir, '{}.pdf'.format(guid1))
cl.exportFile(guid1, destpath=path, service_name=service_name, service_version='v3')
out.res = '/gdrive_static/{}/{}.pdf'.format(doctype, guid1)
elif (doctype == 'slide'):
cl.exportSlides(guid1, parent_dir)
if j.sal.fs.exists('{}/{}/{}.png'.format(parent_dir, guid1, guid2), followlinks=True):
out.res = '/gdrive_static/slide/{}/{}.png'.format(guid1, guid2)
else:
meta = cl.get_presentation_meta('{}/presentations.meta.json'.format(parent_dir), guid1)
if (guid2 in meta):
guid2 = meta[guid2]
guid2 = guid2.split('_', maxsplit=1)[1]
out.res = '/gdrive_static/slide/{}/{}'.format(guid1, guid2)
except GoogleApiHTTPError as api_http_error:
error = j.data.serializers.json.loads(api_http_error.content)['error']
out.error_code = error['code']
out.error_message = error['message']
return out<|docstring|>```in
doctype = "" (S)
guid1 = (S)
guid2 = "" (S)
```
```out
res = "" (S)
error_message = "" (S)
error_code = 0 (I)
```
:param collection:
:param bucket:
:param text:
:return:<|endoftext|> |
89d57b42afc4e38a9c8f34f24db40f2f556b093c6beb351041faa6045196432e | def fetch(dataset='mi', datadir=datadir):
'Fetch example dataset.\n\n If the requested dataset is not found in the location specified by\n `datadir`, the function attempts to download it.\n\n Parameters\n ----------\n dataset : str\n Which dataset to load. Currently only \'mi\' is supported.\n datadir : str\n Path to the storage location of example datasets. Datasets are\n downloaded to this location if they cannot be found. If the directory\n does not exist it is created.\n\n Returns\n -------\n data : list of dicts\n The data set is stored in a list, where each list element\n corresponds to data from one subject. Each list element is a\n dictionary with the following keys:\n "eeg" ... EEG signals\n "triggers" ... Trigger latencies\n "labels" ... Class labels\n "fs" ... Sample rate\n "locations" ... Channel locations\n '
if (dataset not in datasets):
raise ValueError("Example data '{}' not available.".format(dataset))
else:
files = datasets[dataset]['files']
url = datasets[dataset]['url']
md5 = datasets[dataset]['md5']
if (not isdir(datadir)):
makedirs(datadir)
data = []
for (n, filename) in enumerate(files):
fullfile = join(datadir, filename)
if (not isfile(fullfile)):
with open(fullfile, 'wb') as f:
response = get(join(url, filename))
f.write(response.content)
with open(fullfile, 'rb') as f:
hash = hashlib.md5(f.read()).hexdigest()
if (hash != md5[n]):
raise MD5MismatchError('MD5 hash of {} does not match {}.'.format(fullfile, md5[n]))
data.append(convert(dataset, loadmat(fullfile)))
return data | Fetch example dataset.
If the requested dataset is not found in the location specified by
`datadir`, the function attempts to download it.
Parameters
----------
dataset : str
Which dataset to load. Currently only 'mi' is supported.
datadir : str
Path to the storage location of example datasets. Datasets are
downloaded to this location if they cannot be found. If the directory
does not exist it is created.
Returns
-------
data : list of dicts
The data set is stored in a list, where each list element
corresponds to data from one subject. Each list element is a
dictionary with the following keys:
"eeg" ... EEG signals
"triggers" ... Trigger latencies
"labels" ... Class labels
"fs" ... Sample rate
"locations" ... Channel locations | scot/datasets.py | fetch | cle1109/scot | 41 | python | def fetch(dataset='mi', datadir=datadir):
'Fetch example dataset.\n\n If the requested dataset is not found in the location specified by\n `datadir`, the function attempts to download it.\n\n Parameters\n ----------\n dataset : str\n Which dataset to load. Currently only \'mi\' is supported.\n datadir : str\n Path to the storage location of example datasets. Datasets are\n downloaded to this location if they cannot be found. If the directory\n does not exist it is created.\n\n Returns\n -------\n data : list of dicts\n The data set is stored in a list, where each list element\n corresponds to data from one subject. Each list element is a\n dictionary with the following keys:\n "eeg" ... EEG signals\n "triggers" ... Trigger latencies\n "labels" ... Class labels\n "fs" ... Sample rate\n "locations" ... Channel locations\n '
if (dataset not in datasets):
raise ValueError("Example data '{}' not available.".format(dataset))
else:
files = datasets[dataset]['files']
url = datasets[dataset]['url']
md5 = datasets[dataset]['md5']
if (not isdir(datadir)):
makedirs(datadir)
data = []
for (n, filename) in enumerate(files):
fullfile = join(datadir, filename)
if (not isfile(fullfile)):
with open(fullfile, 'wb') as f:
response = get(join(url, filename))
f.write(response.content)
with open(fullfile, 'rb') as f:
hash = hashlib.md5(f.read()).hexdigest()
if (hash != md5[n]):
raise MD5MismatchError('MD5 hash of {} does not match {}.'.format(fullfile, md5[n]))
data.append(convert(dataset, loadmat(fullfile)))
return data | def fetch(dataset='mi', datadir=datadir):
'Fetch example dataset.\n\n If the requested dataset is not found in the location specified by\n `datadir`, the function attempts to download it.\n\n Parameters\n ----------\n dataset : str\n Which dataset to load. Currently only \'mi\' is supported.\n datadir : str\n Path to the storage location of example datasets. Datasets are\n downloaded to this location if they cannot be found. If the directory\n does not exist it is created.\n\n Returns\n -------\n data : list of dicts\n The data set is stored in a list, where each list element\n corresponds to data from one subject. Each list element is a\n dictionary with the following keys:\n "eeg" ... EEG signals\n "triggers" ... Trigger latencies\n "labels" ... Class labels\n "fs" ... Sample rate\n "locations" ... Channel locations\n '
if (dataset not in datasets):
raise ValueError("Example data '{}' not available.".format(dataset))
else:
files = datasets[dataset]['files']
url = datasets[dataset]['url']
md5 = datasets[dataset]['md5']
if (not isdir(datadir)):
makedirs(datadir)
data = []
for (n, filename) in enumerate(files):
fullfile = join(datadir, filename)
if (not isfile(fullfile)):
with open(fullfile, 'wb') as f:
response = get(join(url, filename))
f.write(response.content)
with open(fullfile, 'rb') as f:
hash = hashlib.md5(f.read()).hexdigest()
if (hash != md5[n]):
raise MD5MismatchError('MD5 hash of {} does not match {}.'.format(fullfile, md5[n]))
data.append(convert(dataset, loadmat(fullfile)))
return data<|docstring|>Fetch example dataset.
If the requested dataset is not found in the location specified by
`datadir`, the function attempts to download it.
Parameters
----------
dataset : str
Which dataset to load. Currently only 'mi' is supported.
datadir : str
Path to the storage location of example datasets. Datasets are
downloaded to this location if they cannot be found. If the directory
does not exist it is created.
Returns
-------
data : list of dicts
The data set is stored in a list, where each list element
corresponds to data from one subject. Each list element is a
dictionary with the following keys:
"eeg" ... EEG signals
"triggers" ... Trigger latencies
"labels" ... Class labels
"fs" ... Sample rate
"locations" ... Channel locations<|endoftext|> |
7a459e569e6632b5ca9098db56934be30697372950fd358dd259366784b369cd | def invert_timer():
'\n Invert the timer as an hourglass would, according to the timeout.\n '
now = time()
return ((now - (((gs.timeout * 60) + gs.start_time) - now)) - 1) | Invert the timer as an hourglass would, according to the timeout. | src/timer.py | invert_timer | CypElf/Magic-maze | 1 | python | def invert_timer():
'\n \n '
now = time()
return ((now - (((gs.timeout * 60) + gs.start_time) - now)) - 1) | def invert_timer():
'\n \n '
now = time()
return ((now - (((gs.timeout * 60) + gs.start_time) - now)) - 1)<|docstring|>Invert the timer as an hourglass would, according to the timeout.<|endoftext|> |
c934a827125a9df2428f05e9f7aca48c6ad5433e8fed33bc277ae956d08c58ca | def get_timer():
'\n Return True if the time has elapsed, and False otherwise.\n '
return (((gs.timeout * 60) + gs.start_time) - time()) | Return True if the time has elapsed, and False otherwise. | src/timer.py | get_timer | CypElf/Magic-maze | 1 | python | def get_timer():
'\n \n '
return (((gs.timeout * 60) + gs.start_time) - time()) | def get_timer():
'\n \n '
return (((gs.timeout * 60) + gs.start_time) - time())<|docstring|>Return True if the time has elapsed, and False otherwise.<|endoftext|> |
291e3e89e5210b7201d72b74a7bcd3d339c26cf1b1f6d671c7d61c3afb77b50c | def adjust_timer(previous_start_time, save_time, offset=0):
'\n Restore the start_time to a previous state, to restore the time elapsed since this previous time. You can add bonus seconds to the restored timer with the offset parameter. Defaults to 0.\n '
gs.start_time = ((previous_start_time + (time() - save_time)) + offset) | Restore the start_time to a previous state, to restore the time elapsed since this previous time. You can add bonus seconds to the restored timer with the offset parameter. Defaults to 0. | src/timer.py | adjust_timer | CypElf/Magic-maze | 1 | python | def adjust_timer(previous_start_time, save_time, offset=0):
'\n \n '
gs.start_time = ((previous_start_time + (time() - save_time)) + offset) | def adjust_timer(previous_start_time, save_time, offset=0):
'\n \n '
gs.start_time = ((previous_start_time + (time() - save_time)) + offset)<|docstring|>Restore the start_time to a previous state, to restore the time elapsed since this previous time. You can add bonus seconds to the restored timer with the offset parameter. Defaults to 0.<|endoftext|> |
4850bc5571fb49cbcf4307a54280139999d5e818b45e88e21f74601310f19af7 | def unet(IMAGE_HEIGHT, IMAGE_WIDTH, n_levels=4, initial_features=64, n_conv=2, kernel_size=3, pooling_size=2, in_channels=1, out_channels=1, activation='sigmoid'):
"\n U-Net is a convolutional neural network that was developed for biomedical image segmentation.The network consists of a contracting path and an expansive path, which gives it the u-shaped architecture. The contracting path is a typical convolutional network that consists of repeated application of convolutions, each followed by a rectified linear unit (ReLU) and a max pooling operation. During the contraction, the spatial information is reduced while feature information is increased. The expansive pathway combines the feature and spatial information through a sequence of up-convolutions and concatenations with high-resolution features from the contracting path.\n\n Parameters\n ----------\n IMAGE_HEIGHT : int\n height of the images.\n IMAGE_WIDTH : int\n width of the images.\n n_levels : int, optional\n number of contracting levels, by default 4.\n initial_features : int, optional\n number of initial convolutional layers, by default 64.\n n_conv : int, optional\n number of performed convolutions, by default 2.\n kernel_size : int, optional\n size of the kernel, by default 3.\n pooling_size : int, optional\n size of pooling, by default 2.\n in_channels : int, optional\n number of input channels, by default 1.\n out_channels : int, optional\n number of output channels, by default 1.\n activation: str\n keras activation function name.\n\n Returns\n -------\n keras Model class object\n U-net model.\n\n References\n -----------\n - Wiki: https://en.wikipedia.org/wiki/U-Net\n - U-Net architecture : '../extras/U-Net arch.jpeg'\n - U-Net paper: https://arxiv.org/pdf/1505.04597.pdf\n\n "
inputs = Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
x = inputs
convpars_down = dict(kernel_size=kernel_size, activation='relu', padding='same')
convpars_up = dict(kernel_size=kernel_size, activation='relu', padding='same')
skips = {}
for level in range(n_levels):
for _ in range(n_conv):
x = Conv2D((initial_features * (2 ** level)), **convpars_down)(x)
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
if (level < (n_levels - 1)):
skips[level] = x
x = MaxPool2D(pooling_size)(x)
for level in reversed(range((n_levels - 1))):
x = Conv2DTranspose((initial_features * (2 ** level)), strides=pooling_size, **convpars_up)(x)
x = Concatenate()([x, skips[level]])
for _ in range(n_conv):
x = Conv2D((initial_features * (2 ** level)), **convpars_up)(x)
x = Conv2D(out_channels, kernel_size=1, activation=activation, padding='same')(x)
return tf.keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}') | U-Net is a convolutional neural network that was developed for biomedical image segmentation.The network consists of a contracting path and an expansive path, which gives it the u-shaped architecture. The contracting path is a typical convolutional network that consists of repeated application of convolutions, each followed by a rectified linear unit (ReLU) and a max pooling operation. During the contraction, the spatial information is reduced while feature information is increased. The expansive pathway combines the feature and spatial information through a sequence of up-convolutions and concatenations with high-resolution features from the contracting path.
Parameters
----------
IMAGE_HEIGHT : int
height of the images.
IMAGE_WIDTH : int
width of the images.
n_levels : int, optional
number of contracting levels, by default 4.
initial_features : int, optional
number of initial convolutional layers, by default 64.
n_conv : int, optional
number of performed convolutions, by default 2.
kernel_size : int, optional
size of the kernel, by default 3.
pooling_size : int, optional
size of pooling, by default 2.
in_channels : int, optional
number of input channels, by default 1.
out_channels : int, optional
number of output channels, by default 1.
activation: str
keras activation function name.
Returns
-------
keras Model class object
U-net model.
References
-----------
- Wiki: https://en.wikipedia.org/wiki/U-Net
- U-Net architecture : '../extras/U-Net arch.jpeg'
- U-Net paper: https://arxiv.org/pdf/1505.04597.pdf | MRIsegm/models.py | unet | giuseppefilitto/img-segm | 3 | python | def unet(IMAGE_HEIGHT, IMAGE_WIDTH, n_levels=4, initial_features=64, n_conv=2, kernel_size=3, pooling_size=2, in_channels=1, out_channels=1, activation='sigmoid'):
"\n U-Net is a convolutional neural network that was developed for biomedical image segmentation.The network consists of a contracting path and an expansive path, which gives it the u-shaped architecture. The contracting path is a typical convolutional network that consists of repeated application of convolutions, each followed by a rectified linear unit (ReLU) and a max pooling operation. During the contraction, the spatial information is reduced while feature information is increased. The expansive pathway combines the feature and spatial information through a sequence of up-convolutions and concatenations with high-resolution features from the contracting path.\n\n Parameters\n ----------\n IMAGE_HEIGHT : int\n height of the images.\n IMAGE_WIDTH : int\n width of the images.\n n_levels : int, optional\n number of contracting levels, by default 4.\n initial_features : int, optional\n number of initial convolutional layers, by default 64.\n n_conv : int, optional\n number of performed convolutions, by default 2.\n kernel_size : int, optional\n size of the kernel, by default 3.\n pooling_size : int, optional\n size of pooling, by default 2.\n in_channels : int, optional\n number of input channels, by default 1.\n out_channels : int, optional\n number of output channels, by default 1.\n activation: str\n keras activation function name.\n\n Returns\n -------\n keras Model class object\n U-net model.\n\n References\n -----------\n - Wiki: https://en.wikipedia.org/wiki/U-Net\n - U-Net architecture : '../extras/U-Net arch.jpeg'\n - U-Net paper: https://arxiv.org/pdf/1505.04597.pdf\n\n "
inputs = Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
x = inputs
convpars_down = dict(kernel_size=kernel_size, activation='relu', padding='same')
convpars_up = dict(kernel_size=kernel_size, activation='relu', padding='same')
skips = {}
for level in range(n_levels):
for _ in range(n_conv):
x = Conv2D((initial_features * (2 ** level)), **convpars_down)(x)
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
if (level < (n_levels - 1)):
skips[level] = x
x = MaxPool2D(pooling_size)(x)
for level in reversed(range((n_levels - 1))):
x = Conv2DTranspose((initial_features * (2 ** level)), strides=pooling_size, **convpars_up)(x)
x = Concatenate()([x, skips[level]])
for _ in range(n_conv):
x = Conv2D((initial_features * (2 ** level)), **convpars_up)(x)
x = Conv2D(out_channels, kernel_size=1, activation=activation, padding='same')(x)
return tf.keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}') | def unet(IMAGE_HEIGHT, IMAGE_WIDTH, n_levels=4, initial_features=64, n_conv=2, kernel_size=3, pooling_size=2, in_channels=1, out_channels=1, activation='sigmoid'):
"\n U-Net is a convolutional neural network that was developed for biomedical image segmentation.The network consists of a contracting path and an expansive path, which gives it the u-shaped architecture. The contracting path is a typical convolutional network that consists of repeated application of convolutions, each followed by a rectified linear unit (ReLU) and a max pooling operation. During the contraction, the spatial information is reduced while feature information is increased. The expansive pathway combines the feature and spatial information through a sequence of up-convolutions and concatenations with high-resolution features from the contracting path.\n\n Parameters\n ----------\n IMAGE_HEIGHT : int\n height of the images.\n IMAGE_WIDTH : int\n width of the images.\n n_levels : int, optional\n number of contracting levels, by default 4.\n initial_features : int, optional\n number of initial convolutional layers, by default 64.\n n_conv : int, optional\n number of performed convolutions, by default 2.\n kernel_size : int, optional\n size of the kernel, by default 3.\n pooling_size : int, optional\n size of pooling, by default 2.\n in_channels : int, optional\n number of input channels, by default 1.\n out_channels : int, optional\n number of output channels, by default 1.\n activation: str\n keras activation function name.\n\n Returns\n -------\n keras Model class object\n U-net model.\n\n References\n -----------\n - Wiki: https://en.wikipedia.org/wiki/U-Net\n - U-Net architecture : '../extras/U-Net arch.jpeg'\n - U-Net paper: https://arxiv.org/pdf/1505.04597.pdf\n\n "
inputs = Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
x = inputs
convpars_down = dict(kernel_size=kernel_size, activation='relu', padding='same')
convpars_up = dict(kernel_size=kernel_size, activation='relu', padding='same')
skips = {}
for level in range(n_levels):
for _ in range(n_conv):
x = Conv2D((initial_features * (2 ** level)), **convpars_down)(x)
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
if (level < (n_levels - 1)):
skips[level] = x
x = MaxPool2D(pooling_size)(x)
for level in reversed(range((n_levels - 1))):
x = Conv2DTranspose((initial_features * (2 ** level)), strides=pooling_size, **convpars_up)(x)
x = Concatenate()([x, skips[level]])
for _ in range(n_conv):
x = Conv2D((initial_features * (2 ** level)), **convpars_up)(x)
x = Conv2D(out_channels, kernel_size=1, activation=activation, padding='same')(x)
return tf.keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}')<|docstring|>U-Net is a convolutional neural network that was developed for biomedical image segmentation.The network consists of a contracting path and an expansive path, which gives it the u-shaped architecture. The contracting path is a typical convolutional network that consists of repeated application of convolutions, each followed by a rectified linear unit (ReLU) and a max pooling operation. During the contraction, the spatial information is reduced while feature information is increased. The expansive pathway combines the feature and spatial information through a sequence of up-convolutions and concatenations with high-resolution features from the contracting path.
Parameters
----------
IMAGE_HEIGHT : int
height of the images.
IMAGE_WIDTH : int
width of the images.
n_levels : int, optional
number of contracting levels, by default 4.
initial_features : int, optional
number of initial convolutional layers, by default 64.
n_conv : int, optional
number of performed convolutions, by default 2.
kernel_size : int, optional
size of the kernel, by default 3.
pooling_size : int, optional
size of pooling, by default 2.
in_channels : int, optional
number of input channels, by default 1.
out_channels : int, optional
number of output channels, by default 1.
activation: str
keras activation function name.
Returns
-------
keras Model class object
U-net model.
References
-----------
- Wiki: https://en.wikipedia.org/wiki/U-Net
- U-Net architecture : '../extras/U-Net arch.jpeg'
- U-Net paper: https://arxiv.org/pdf/1505.04597.pdf<|endoftext|> |
d0f2cf81507b65cfec0c22badd20e8ece2005b5c8a15401296ad0289ee7e3ff9 | def test_create_from_data(self):
'Testing DiffCommitManager.create_from_data'
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
diffset = DiffSet.objects.create_empty(repository=repository, basedir='', revision=1)
raw_date = '2000-01-01 00:00:00-0600'
parsed_date = parse_date(raw_date)
commit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.commit_test_diff, parent_diff_file_name=None, parent_diff_file_contents=b'', request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=parsed_date, committer_name='Committer', committer_email='[email protected]', committer_date=parsed_date, commit_message='Description', diffset=diffset, validation_info={})
self.assertEqual(commit.files.count(), 1)
self.assertEqual(diffset.files.count(), commit.files.count())
self.assertEqual(diffset.commit_count, 1)
self.assertEqual(commit.author_date, parsed_date)
self.assertEqual(commit.committer_date, parsed_date)
self.assertEqual(commit.committer_date_utc, parsed_date)
self.assertEqual(commit.committer_date_offset, (- 21600.0))
self.assertEqual(commit.author_date.strftime(DiffCommit.ISO_DATE_FORMAT), raw_date)
self.assertEqual(commit.committer_date.strftime(DiffCommit.ISO_DATE_FORMAT), raw_date) | Testing DiffCommitManager.create_from_data | reviewboard/diffviewer/tests/test_diffcommit_manager.py | test_create_from_data | prateeja/reviewboard | 921 | python | def test_create_from_data(self):
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
diffset = DiffSet.objects.create_empty(repository=repository, basedir=, revision=1)
raw_date = '2000-01-01 00:00:00-0600'
parsed_date = parse_date(raw_date)
commit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.commit_test_diff, parent_diff_file_name=None, parent_diff_file_contents=b, request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=parsed_date, committer_name='Committer', committer_email='[email protected]', committer_date=parsed_date, commit_message='Description', diffset=diffset, validation_info={})
self.assertEqual(commit.files.count(), 1)
self.assertEqual(diffset.files.count(), commit.files.count())
self.assertEqual(diffset.commit_count, 1)
self.assertEqual(commit.author_date, parsed_date)
self.assertEqual(commit.committer_date, parsed_date)
self.assertEqual(commit.committer_date_utc, parsed_date)
self.assertEqual(commit.committer_date_offset, (- 21600.0))
self.assertEqual(commit.author_date.strftime(DiffCommit.ISO_DATE_FORMAT), raw_date)
self.assertEqual(commit.committer_date.strftime(DiffCommit.ISO_DATE_FORMAT), raw_date) | def test_create_from_data(self):
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
diffset = DiffSet.objects.create_empty(repository=repository, basedir=, revision=1)
raw_date = '2000-01-01 00:00:00-0600'
parsed_date = parse_date(raw_date)
commit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.commit_test_diff, parent_diff_file_name=None, parent_diff_file_contents=b, request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=parsed_date, committer_name='Committer', committer_email='[email protected]', committer_date=parsed_date, commit_message='Description', diffset=diffset, validation_info={})
self.assertEqual(commit.files.count(), 1)
self.assertEqual(diffset.files.count(), commit.files.count())
self.assertEqual(diffset.commit_count, 1)
self.assertEqual(commit.author_date, parsed_date)
self.assertEqual(commit.committer_date, parsed_date)
self.assertEqual(commit.committer_date_utc, parsed_date)
self.assertEqual(commit.committer_date_offset, (- 21600.0))
self.assertEqual(commit.author_date.strftime(DiffCommit.ISO_DATE_FORMAT), raw_date)
self.assertEqual(commit.committer_date.strftime(DiffCommit.ISO_DATE_FORMAT), raw_date)<|docstring|>Testing DiffCommitManager.create_from_data<|endoftext|> |
14cf96e6f5a96df208a40e92ce541b3b611df51d427401bd75e523f8ad1e9c0e | def test_create_from_data_with_author_date_none(self):
'Testing DiffCommitManager.create_from_data with author_date=None'
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
diffset = DiffSet.objects.create_empty(repository=repository, basedir='', revision=1)
with self.assertRaises(IntegrityError):
commit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.commit_test_diff, parent_diff_file_name=None, parent_diff_file_contents=b'', request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=None, committer_name='Committer', committer_email='[email protected]', committer_date=parse_date('2000-01-01 00:00:00-0600'), commit_message='Description', diffset=diffset, validation_info={})
self.assertEqual(diffset.commit_count, 0) | Testing DiffCommitManager.create_from_data with author_date=None | reviewboard/diffviewer/tests/test_diffcommit_manager.py | test_create_from_data_with_author_date_none | prateeja/reviewboard | 921 | python | def test_create_from_data_with_author_date_none(self):
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
diffset = DiffSet.objects.create_empty(repository=repository, basedir=, revision=1)
with self.assertRaises(IntegrityError):
commit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.commit_test_diff, parent_diff_file_name=None, parent_diff_file_contents=b, request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=None, committer_name='Committer', committer_email='[email protected]', committer_date=parse_date('2000-01-01 00:00:00-0600'), commit_message='Description', diffset=diffset, validation_info={})
self.assertEqual(diffset.commit_count, 0) | def test_create_from_data_with_author_date_none(self):
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
diffset = DiffSet.objects.create_empty(repository=repository, basedir=, revision=1)
with self.assertRaises(IntegrityError):
commit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.commit_test_diff, parent_diff_file_name=None, parent_diff_file_contents=b, request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=None, committer_name='Committer', committer_email='[email protected]', committer_date=parse_date('2000-01-01 00:00:00-0600'), commit_message='Description', diffset=diffset, validation_info={})
self.assertEqual(diffset.commit_count, 0)<|docstring|>Testing DiffCommitManager.create_from_data with author_date=None<|endoftext|> |
78eb7658a80dfade693948577a2662c33ab7a67f8440543c159d768ced56f95f | def test_create_from_data_with_committer_date_none(self):
'Testing DiffCommitManager.create_from_data with committer_date=None\n '
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
diffset = DiffSet.objects.create_empty(repository=repository, basedir='', revision=1)
commit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.commit_test_diff, parent_diff_file_name=None, parent_diff_file_contents=b'', request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=parse_date('2000-01-01 00:00:00-0600'), committer_name='Committer', committer_email='[email protected]', committer_date=None, commit_message='Description', diffset=diffset, validation_info={})
self.assertEqual(commit.files.count(), 1)
self.assertEqual(diffset.files.count(), commit.files.count())
self.assertEqual(diffset.commit_count, 1)
self.assertIsNone(commit.committer_date)
self.assertIsNone(commit.committer_date_utc)
self.assertIsNone(commit.committer_date_offset) | Testing DiffCommitManager.create_from_data with committer_date=None | reviewboard/diffviewer/tests/test_diffcommit_manager.py | test_create_from_data_with_committer_date_none | prateeja/reviewboard | 921 | python | def test_create_from_data_with_committer_date_none(self):
'\n '
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
diffset = DiffSet.objects.create_empty(repository=repository, basedir=, revision=1)
commit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.commit_test_diff, parent_diff_file_name=None, parent_diff_file_contents=b, request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=parse_date('2000-01-01 00:00:00-0600'), committer_name='Committer', committer_email='[email protected]', committer_date=None, commit_message='Description', diffset=diffset, validation_info={})
self.assertEqual(commit.files.count(), 1)
self.assertEqual(diffset.files.count(), commit.files.count())
self.assertEqual(diffset.commit_count, 1)
self.assertIsNone(commit.committer_date)
self.assertIsNone(commit.committer_date_utc)
self.assertIsNone(commit.committer_date_offset) | def test_create_from_data_with_committer_date_none(self):
'\n '
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
diffset = DiffSet.objects.create_empty(repository=repository, basedir=, revision=1)
commit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.commit_test_diff, parent_diff_file_name=None, parent_diff_file_contents=b, request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=parse_date('2000-01-01 00:00:00-0600'), committer_name='Committer', committer_email='[email protected]', committer_date=None, commit_message='Description', diffset=diffset, validation_info={})
self.assertEqual(commit.files.count(), 1)
self.assertEqual(diffset.files.count(), commit.files.count())
self.assertEqual(diffset.commit_count, 1)
self.assertIsNone(commit.committer_date)
self.assertIsNone(commit.committer_date_utc)
self.assertIsNone(commit.committer_date_offset)<|docstring|>Testing DiffCommitManager.create_from_data with committer_date=None<|endoftext|> |
a0185b0ea499ea1eb287201e094f032c6c0ab85d4d1bd8b6d22f48a9a97b9714 | def test_create_from_data_custom_parser_extra_data(self):
'Testing DiffSetManager.create_from_data with a custom diff parser\n that sets extra_data\n '
repository = self.create_repository(tool_name='Test')
class CustomParser(DiffParser):
def parse(self):
result = super(CustomParser, self).parse()
self.parsed_diff.extra_data = {'key1': 'value1'}
self.parsed_diff_change.extra_data = {'key2': 'value2'}
return result
def parse_diff_header(self, linenum, parsed_file):
parsed_file.extra_data = {'key3': 'value3'}
return super(CustomParser, self).parse_diff_header(linenum, parsed_file)
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
tool = repository.get_scmtool()
self.spy_on(repository.get_scmtool, op=kgb.SpyOpReturn(tool))
self.spy_on(tool.get_parser, call_fake=(lambda repo, diff: CustomParser(diff)))
diffset = DiffSet.objects.create_empty(repository=repository, basedir='', revision=1)
diffcommit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.DEFAULT_FILEDIFF_DATA_DIFF, parent_diff_file_name=None, parent_diff_file_contents=b'', request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=parse_date('2000-01-01 00:00:00-0600'), committer_name='Committer', committer_email='[email protected]', committer_date=None, commit_message='Description', diffset=diffset, validation_info={})
diffset.refresh_from_db()
diffcommit.refresh_from_db()
self.assertEqual(diffset.extra_data, {'key1': 'value1'})
self.assertEqual(diffcommit.extra_data, {'key2': 'value2'})
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.all()[0]
self.assertEqual(filediff.extra_data, {'is_symlink': False, 'key3': 'value3', 'raw_delete_count': 1, 'raw_insert_count': 1}) | Testing DiffSetManager.create_from_data with a custom diff parser
that sets extra_data | reviewboard/diffviewer/tests/test_diffcommit_manager.py | test_create_from_data_custom_parser_extra_data | prateeja/reviewboard | 921 | python | def test_create_from_data_custom_parser_extra_data(self):
'Testing DiffSetManager.create_from_data with a custom diff parser\n that sets extra_data\n '
repository = self.create_repository(tool_name='Test')
class CustomParser(DiffParser):
def parse(self):
result = super(CustomParser, self).parse()
self.parsed_diff.extra_data = {'key1': 'value1'}
self.parsed_diff_change.extra_data = {'key2': 'value2'}
return result
def parse_diff_header(self, linenum, parsed_file):
parsed_file.extra_data = {'key3': 'value3'}
return super(CustomParser, self).parse_diff_header(linenum, parsed_file)
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
tool = repository.get_scmtool()
self.spy_on(repository.get_scmtool, op=kgb.SpyOpReturn(tool))
self.spy_on(tool.get_parser, call_fake=(lambda repo, diff: CustomParser(diff)))
diffset = DiffSet.objects.create_empty(repository=repository, basedir=, revision=1)
diffcommit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.DEFAULT_FILEDIFF_DATA_DIFF, parent_diff_file_name=None, parent_diff_file_contents=b, request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=parse_date('2000-01-01 00:00:00-0600'), committer_name='Committer', committer_email='[email protected]', committer_date=None, commit_message='Description', diffset=diffset, validation_info={})
diffset.refresh_from_db()
diffcommit.refresh_from_db()
self.assertEqual(diffset.extra_data, {'key1': 'value1'})
self.assertEqual(diffcommit.extra_data, {'key2': 'value2'})
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.all()[0]
self.assertEqual(filediff.extra_data, {'is_symlink': False, 'key3': 'value3', 'raw_delete_count': 1, 'raw_insert_count': 1}) | def test_create_from_data_custom_parser_extra_data(self):
'Testing DiffSetManager.create_from_data with a custom diff parser\n that sets extra_data\n '
repository = self.create_repository(tool_name='Test')
class CustomParser(DiffParser):
def parse(self):
result = super(CustomParser, self).parse()
self.parsed_diff.extra_data = {'key1': 'value1'}
self.parsed_diff_change.extra_data = {'key2': 'value2'}
return result
def parse_diff_header(self, linenum, parsed_file):
parsed_file.extra_data = {'key3': 'value3'}
return super(CustomParser, self).parse_diff_header(linenum, parsed_file)
self.spy_on(repository.get_file_exists, op=kgb.SpyOpReturn(True))
tool = repository.get_scmtool()
self.spy_on(repository.get_scmtool, op=kgb.SpyOpReturn(tool))
self.spy_on(tool.get_parser, call_fake=(lambda repo, diff: CustomParser(diff)))
diffset = DiffSet.objects.create_empty(repository=repository, basedir=, revision=1)
diffcommit = DiffCommit.objects.create_from_data(repository=repository, diff_file_name='diff', diff_file_contents=self.DEFAULT_FILEDIFF_DATA_DIFF, parent_diff_file_name=None, parent_diff_file_contents=b, request=None, commit_id='r1', parent_id='r0', author_name='Author', author_email='[email protected]', author_date=parse_date('2000-01-01 00:00:00-0600'), committer_name='Committer', committer_email='[email protected]', committer_date=None, commit_message='Description', diffset=diffset, validation_info={})
diffset.refresh_from_db()
diffcommit.refresh_from_db()
self.assertEqual(diffset.extra_data, {'key1': 'value1'})
self.assertEqual(diffcommit.extra_data, {'key2': 'value2'})
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.all()[0]
self.assertEqual(filediff.extra_data, {'is_symlink': False, 'key3': 'value3', 'raw_delete_count': 1, 'raw_insert_count': 1})<|docstring|>Testing DiffSetManager.create_from_data with a custom diff parser
that sets extra_data<|endoftext|> |
e2c70c958b23ed1c2bc3d1c484e263e670e1660952f997709b09b01d52cce4a0 | def get_visits(pattern):
"\n A utility function to group existing files into a set of visits. \n\n Attributes\n ----------\n pattern: a string to match to files, e.g. '*_flt.fits', '*_ima.fits', or '*_raw.fits'\n\n\n Output\n ------\n A dictionary with filter names as labels and visit names (6 letters) as sub-labels.\n "
files = glob.glob(pattern)
dic = {}
for f in files:
(p, name) = os.path.split(f)
obs_id = name[0:9]
visit = obs_id[0:6]
try:
filt = fits.open(f)[0].header['FILTER']
except KeyError:
print('FILTER keyword not found in {}'.format(f))
continue
if (filt not in list(dic.keys())):
dic[filt] = {}
if (visit not in list(dic[filt].keys())):
dic[filt][visit] = []
dic[filt][visit].append(obs_id)
return dic | A utility function to group existing files into a set of visits.
Attributes
----------
pattern: a string to match to files, e.g. '*_flt.fits', '*_ima.fits', or '*_raw.fits'
Output
------
A dictionary with filter names as labels and visit names (6 letters) as sub-labels. | WFC3_Back_Sub/back_sub.py | get_visits | npirzkal/WFC3_Back_sub | 0 | python | def get_visits(pattern):
"\n A utility function to group existing files into a set of visits. \n\n Attributes\n ----------\n pattern: a string to match to files, e.g. '*_flt.fits', '*_ima.fits', or '*_raw.fits'\n\n\n Output\n ------\n A dictionary with filter names as labels and visit names (6 letters) as sub-labels.\n "
files = glob.glob(pattern)
dic = {}
for f in files:
(p, name) = os.path.split(f)
obs_id = name[0:9]
visit = obs_id[0:6]
try:
filt = fits.open(f)[0].header['FILTER']
except KeyError:
print('FILTER keyword not found in {}'.format(f))
continue
if (filt not in list(dic.keys())):
dic[filt] = {}
if (visit not in list(dic[filt].keys())):
dic[filt][visit] = []
dic[filt][visit].append(obs_id)
return dic | def get_visits(pattern):
"\n A utility function to group existing files into a set of visits. \n\n Attributes\n ----------\n pattern: a string to match to files, e.g. '*_flt.fits', '*_ima.fits', or '*_raw.fits'\n\n\n Output\n ------\n A dictionary with filter names as labels and visit names (6 letters) as sub-labels.\n "
files = glob.glob(pattern)
dic = {}
for f in files:
(p, name) = os.path.split(f)
obs_id = name[0:9]
visit = obs_id[0:6]
try:
filt = fits.open(f)[0].header['FILTER']
except KeyError:
print('FILTER keyword not found in {}'.format(f))
continue
if (filt not in list(dic.keys())):
dic[filt] = {}
if (visit not in list(dic[filt].keys())):
dic[filt][visit] = []
dic[filt][visit].append(obs_id)
return dic<|docstring|>A utility function to group existing files into a set of visits.
Attributes
----------
pattern: a string to match to files, e.g. '*_flt.fits', '*_ima.fits', or '*_raw.fits'
Output
------
A dictionary with filter names as labels and visit names (6 letters) as sub-labels.<|endoftext|> |
899af7dad8d88e60171f391f949e29f2fa0f3d15bb5cbd6bf1c012f1c2541292 | def get_data(obs_id):
"\n A helper function to download a raw dataset from MAST using a direct URL\n\n Attributes\n ----------\n obs_id: string containing a 9 letter dataset name, e.g. 'idn604snq'\n\n Output\n ------\n string containing the name of the raw file that was downloaded\n "
url = 'https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HST/product/{}_raw.fits'.format(obs_id)
raw_name = './{}_raw.fits'.format(obs_id)
if os.path.isfile(raw_name):
os.unlink(raw_name)
with urllib.request.urlopen(url) as response, open(raw_name, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
return raw_name | A helper function to download a raw dataset from MAST using a direct URL
Attributes
----------
obs_id: string containing a 9 letter dataset name, e.g. 'idn604snq'
Output
------
string containing the name of the raw file that was downloaded | WFC3_Back_Sub/back_sub.py | get_data | npirzkal/WFC3_Back_sub | 0 | python | def get_data(obs_id):
"\n A helper function to download a raw dataset from MAST using a direct URL\n\n Attributes\n ----------\n obs_id: string containing a 9 letter dataset name, e.g. 'idn604snq'\n\n Output\n ------\n string containing the name of the raw file that was downloaded\n "
url = 'https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HST/product/{}_raw.fits'.format(obs_id)
raw_name = './{}_raw.fits'.format(obs_id)
if os.path.isfile(raw_name):
os.unlink(raw_name)
with urllib.request.urlopen(url) as response, open(raw_name, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
return raw_name | def get_data(obs_id):
"\n A helper function to download a raw dataset from MAST using a direct URL\n\n Attributes\n ----------\n obs_id: string containing a 9 letter dataset name, e.g. 'idn604snq'\n\n Output\n ------\n string containing the name of the raw file that was downloaded\n "
url = 'https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HST/product/{}_raw.fits'.format(obs_id)
raw_name = './{}_raw.fits'.format(obs_id)
if os.path.isfile(raw_name):
os.unlink(raw_name)
with urllib.request.urlopen(url) as response, open(raw_name, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
return raw_name<|docstring|>A helper function to download a raw dataset from MAST using a direct URL
Attributes
----------
obs_id: string containing a 9 letter dataset name, e.g. 'idn604snq'
Output
------
string containing the name of the raw file that was downloaded<|endoftext|> |
e6fdc19856ac4a4cf64a6886c16d63088b66818e5ef75cc1fec29805971291d8 | def process_obs_ids(self, backup=False):
'\n Function to perform all the required steps to remove the time varying HeI and Scattered light component as well as the Zodi\n component from a group of WFC3 IR G102 or G141 RAW files.\n This runs CALWF3 on each input RAW files, create a mask MSK file, estimates the HeI and Scattered light levels during the course\n of each observations and removes those contributions, runs CALWF3 on the result to produce an FLT file and, finally, estimates and\n remove the Zodi light from the final FLT file to generate a set of background subtracted FLT files.\n '
self.load_backgrounds()
raw_names = ['{}_raw.fits'.format(x) for x in self.obs_ids]
flt_names = [self.raw_to_flt(x) for x in raw_names]
if (backup == True):
for flt_name in flt_names:
print('Saving orginal version of FLT {}', flt_name)
shutil.copy(flt_name, '{}_flt0.fits'.format(flt_name.split('_flt.fits')[0]))
[self.create_msk('{}_flt.fits'.format(x), thr=self.thr) for x in self.obs_ids]
self.Get_HeI_Zodi_Scatter_Levels()
self.sub_HeI_Scat()
self.flt_names = [self.ima_to_flt('{}_ima.fits'.format(x)) for x in self.obs_ids]
[self.sub_Zodi(x) for x in self.flt_names]
self.log_levels()
if self.plot:
self.diagnostic_plots()
return flt_names | Function to perform all the required steps to remove the time varying HeI and Scattered light component as well as the Zodi
component from a group of WFC3 IR G102 or G141 RAW files.
This runs CALWF3 on each input RAW files, create a mask MSK file, estimates the HeI and Scattered light levels during the course
of each observations and removes those contributions, runs CALWF3 on the result to produce an FLT file and, finally, estimates and
remove the Zodi light from the final FLT file to generate a set of background subtracted FLT files. | WFC3_Back_Sub/back_sub.py | process_obs_ids | npirzkal/WFC3_Back_sub | 0 | python | def process_obs_ids(self, backup=False):
'\n Function to perform all the required steps to remove the time varying HeI and Scattered light component as well as the Zodi\n component from a group of WFC3 IR G102 or G141 RAW files.\n This runs CALWF3 on each input RAW files, create a mask MSK file, estimates the HeI and Scattered light levels during the course\n of each observations and removes those contributions, runs CALWF3 on the result to produce an FLT file and, finally, estimates and\n remove the Zodi light from the final FLT file to generate a set of background subtracted FLT files.\n '
self.load_backgrounds()
raw_names = ['{}_raw.fits'.format(x) for x in self.obs_ids]
flt_names = [self.raw_to_flt(x) for x in raw_names]
if (backup == True):
for flt_name in flt_names:
print('Saving orginal version of FLT {}', flt_name)
shutil.copy(flt_name, '{}_flt0.fits'.format(flt_name.split('_flt.fits')[0]))
[self.create_msk('{}_flt.fits'.format(x), thr=self.thr) for x in self.obs_ids]
self.Get_HeI_Zodi_Scatter_Levels()
self.sub_HeI_Scat()
self.flt_names = [self.ima_to_flt('{}_ima.fits'.format(x)) for x in self.obs_ids]
[self.sub_Zodi(x) for x in self.flt_names]
self.log_levels()
if self.plot:
self.diagnostic_plots()
return flt_names | def process_obs_ids(self, backup=False):
'\n Function to perform all the required steps to remove the time varying HeI and Scattered light component as well as the Zodi\n component from a group of WFC3 IR G102 or G141 RAW files.\n This runs CALWF3 on each input RAW files, create a mask MSK file, estimates the HeI and Scattered light levels during the course\n of each observations and removes those contributions, runs CALWF3 on the result to produce an FLT file and, finally, estimates and\n remove the Zodi light from the final FLT file to generate a set of background subtracted FLT files.\n '
self.load_backgrounds()
raw_names = ['{}_raw.fits'.format(x) for x in self.obs_ids]
flt_names = [self.raw_to_flt(x) for x in raw_names]
if (backup == True):
for flt_name in flt_names:
print('Saving orginal version of FLT {}', flt_name)
shutil.copy(flt_name, '{}_flt0.fits'.format(flt_name.split('_flt.fits')[0]))
[self.create_msk('{}_flt.fits'.format(x), thr=self.thr) for x in self.obs_ids]
self.Get_HeI_Zodi_Scatter_Levels()
self.sub_HeI_Scat()
self.flt_names = [self.ima_to_flt('{}_ima.fits'.format(x)) for x in self.obs_ids]
[self.sub_Zodi(x) for x in self.flt_names]
self.log_levels()
if self.plot:
self.diagnostic_plots()
return flt_names<|docstring|>Function to perform all the required steps to remove the time varying HeI and Scattered light component as well as the Zodi
component from a group of WFC3 IR G102 or G141 RAW files.
This runs CALWF3 on each input RAW files, create a mask MSK file, estimates the HeI and Scattered light levels during the course
of each observations and removes those contributions, runs CALWF3 on the result to produce an FLT file and, finally, estimates and
remove the Zodi light from the final FLT file to generate a set of background subtracted FLT files.<|endoftext|> |
63fe0fe5386764940c573150b01ad1155dca15f5b8ee7eb4affce1c845e232d1 | def log_levels(self):
'Copy all the information from IMA into our final FLT file.\n Keywords added are: BSAMP (number of reads), HeI_#, Scat_#, HeI_#, TIME (ROUTIME), and DTIME (IMSET exposure)'
for flt_name in self.flt_names:
ima_name = (flt_name.split('_flt.fits')[0] + '_ima.fits')
print(ima_name)
with fits.open(flt_name, mode='update') as fflt:
with fits.open(ima_name) as fima:
extnum = fima[0].header['NSAMP']
fflt[1].header['BSAMP'] = extnum
for NSAMP in range(1, extnum):
HeI = fima[('SCI', NSAMP)].header['HeI_{}'.format(NSAMP)]
Scat = fima[('SCI', NSAMP)].header['Scat_{}'.format(NSAMP)]
ROUTTIME = fima[('SCI', NSAMP)].header['ROUTTIME']
DELTATIM = fima[('SCI', NSAMP)].header['DELTATIM']
print(NSAMP, ROUTTIME, HeI, Scat)
fflt[1].header['HeI_{}'.format(NSAMP)] = HeI
fflt[1].header['Scat_{}'.format(NSAMP)] = Scat
fflt[1].header['TIME_{}'.format(NSAMP)] = ROUTTIME
fflt[1].header['DTIME_{}'.format(NSAMP)] = DELTATIM | Copy all the information from IMA into our final FLT file.
Keywords added are: BSAMP (number of reads), HeI_#, Scat_#, HeI_#, TIME (ROUTIME), and DTIME (IMSET exposure) | WFC3_Back_Sub/back_sub.py | log_levels | npirzkal/WFC3_Back_sub | 0 | python | def log_levels(self):
'Copy all the information from IMA into our final FLT file.\n Keywords added are: BSAMP (number of reads), HeI_#, Scat_#, HeI_#, TIME (ROUTIME), and DTIME (IMSET exposure)'
for flt_name in self.flt_names:
ima_name = (flt_name.split('_flt.fits')[0] + '_ima.fits')
print(ima_name)
with fits.open(flt_name, mode='update') as fflt:
with fits.open(ima_name) as fima:
extnum = fima[0].header['NSAMP']
fflt[1].header['BSAMP'] = extnum
for NSAMP in range(1, extnum):
HeI = fima[('SCI', NSAMP)].header['HeI_{}'.format(NSAMP)]
Scat = fima[('SCI', NSAMP)].header['Scat_{}'.format(NSAMP)]
ROUTTIME = fima[('SCI', NSAMP)].header['ROUTTIME']
DELTATIM = fima[('SCI', NSAMP)].header['DELTATIM']
print(NSAMP, ROUTTIME, HeI, Scat)
fflt[1].header['HeI_{}'.format(NSAMP)] = HeI
fflt[1].header['Scat_{}'.format(NSAMP)] = Scat
fflt[1].header['TIME_{}'.format(NSAMP)] = ROUTTIME
fflt[1].header['DTIME_{}'.format(NSAMP)] = DELTATIM | def log_levels(self):
'Copy all the information from IMA into our final FLT file.\n Keywords added are: BSAMP (number of reads), HeI_#, Scat_#, HeI_#, TIME (ROUTIME), and DTIME (IMSET exposure)'
for flt_name in self.flt_names:
ima_name = (flt_name.split('_flt.fits')[0] + '_ima.fits')
print(ima_name)
with fits.open(flt_name, mode='update') as fflt:
with fits.open(ima_name) as fima:
extnum = fima[0].header['NSAMP']
fflt[1].header['BSAMP'] = extnum
for NSAMP in range(1, extnum):
HeI = fima[('SCI', NSAMP)].header['HeI_{}'.format(NSAMP)]
Scat = fima[('SCI', NSAMP)].header['Scat_{}'.format(NSAMP)]
ROUTTIME = fima[('SCI', NSAMP)].header['ROUTTIME']
DELTATIM = fima[('SCI', NSAMP)].header['DELTATIM']
print(NSAMP, ROUTTIME, HeI, Scat)
fflt[1].header['HeI_{}'.format(NSAMP)] = HeI
fflt[1].header['Scat_{}'.format(NSAMP)] = Scat
fflt[1].header['TIME_{}'.format(NSAMP)] = ROUTTIME
fflt[1].header['DTIME_{}'.format(NSAMP)] = DELTATIM<|docstring|>Copy all the information from IMA into our final FLT file.
Keywords added are: BSAMP (number of reads), HeI_#, Scat_#, HeI_#, TIME (ROUTIME), and DTIME (IMSET exposure)<|endoftext|> |
f71c4eca6f290551afec28ad80f00fdd8151ed62066421e4ca3153edb3b23ab9 | def raw_to_flt(self, raw_name):
'\n Function to run CALWF3 on a raw dataset. CRCORR is set to OMIT and FLATCORR is set to perform.\n If available, crds is ran to download and updat the RAW file header to point to the latest calibration files.\n The flat-field calibration file names are replaced with the ones included in this package and pointed to by\n G102_FF and G141_FF.\n\n Attributes\n ----------\n None\n\n Output\n ------\n string containing the name of the FLT file that was created\n\n '
CRCORR = 'OMIT'
FLATCORR = 'PERFORM'
obs_id = raw_name.split('_raw')[0]
files = ['{}_flt.fits'.format(obs_id), '{}_ima.fits'.format(obs_id)]
for ff in files:
if os.path.isfile(ff):
os.unlink(ff)
print('Processing ', raw_name)
res = os.system('crds bestrefs --files {} --sync-references=1 --update-bestrefs '.format(raw_name))
if (res != 0):
print('CRDS did not run.')
fin = fits.open(raw_name, mode='update')
fin[0].header['CRCORR'] = CRCORR
fin[0].header['FLATCORR'] = FLATCORR
filt = fin[0].header['FILTER']
self.org_FF_file = fin[0].header['PFLTFILE']
fin[0].header['PFLTFILE'] = self.FF_file
fin.close()
calwf3(raw_name)
flt_name = (raw_name.split('_raw.fits')[0] + '_flt.fits')
if (not os.path.isfile(flt_name)):
print('raw_to_flt() failed to generate ', flt_name)
sys.exit(1)
return flt_name | Function to run CALWF3 on a raw dataset. CRCORR is set to OMIT and FLATCORR is set to perform.
If available, crds is ran to download and updat the RAW file header to point to the latest calibration files.
The flat-field calibration file names are replaced with the ones included in this package and pointed to by
G102_FF and G141_FF.
Attributes
----------
None
Output
------
string containing the name of the FLT file that was created | WFC3_Back_Sub/back_sub.py | raw_to_flt | npirzkal/WFC3_Back_sub | 0 | python | def raw_to_flt(self, raw_name):
'\n Function to run CALWF3 on a raw dataset. CRCORR is set to OMIT and FLATCORR is set to perform.\n If available, crds is ran to download and updat the RAW file header to point to the latest calibration files.\n The flat-field calibration file names are replaced with the ones included in this package and pointed to by\n G102_FF and G141_FF.\n\n Attributes\n ----------\n None\n\n Output\n ------\n string containing the name of the FLT file that was created\n\n '
CRCORR = 'OMIT'
FLATCORR = 'PERFORM'
obs_id = raw_name.split('_raw')[0]
files = ['{}_flt.fits'.format(obs_id), '{}_ima.fits'.format(obs_id)]
for ff in files:
if os.path.isfile(ff):
os.unlink(ff)
print('Processing ', raw_name)
res = os.system('crds bestrefs --files {} --sync-references=1 --update-bestrefs '.format(raw_name))
if (res != 0):
print('CRDS did not run.')
fin = fits.open(raw_name, mode='update')
fin[0].header['CRCORR'] = CRCORR
fin[0].header['FLATCORR'] = FLATCORR
filt = fin[0].header['FILTER']
self.org_FF_file = fin[0].header['PFLTFILE']
fin[0].header['PFLTFILE'] = self.FF_file
fin.close()
calwf3(raw_name)
flt_name = (raw_name.split('_raw.fits')[0] + '_flt.fits')
if (not os.path.isfile(flt_name)):
print('raw_to_flt() failed to generate ', flt_name)
sys.exit(1)
return flt_name | def raw_to_flt(self, raw_name):
'\n Function to run CALWF3 on a raw dataset. CRCORR is set to OMIT and FLATCORR is set to perform.\n If available, crds is ran to download and updat the RAW file header to point to the latest calibration files.\n The flat-field calibration file names are replaced with the ones included in this package and pointed to by\n G102_FF and G141_FF.\n\n Attributes\n ----------\n None\n\n Output\n ------\n string containing the name of the FLT file that was created\n\n '
CRCORR = 'OMIT'
FLATCORR = 'PERFORM'
obs_id = raw_name.split('_raw')[0]
files = ['{}_flt.fits'.format(obs_id), '{}_ima.fits'.format(obs_id)]
for ff in files:
if os.path.isfile(ff):
os.unlink(ff)
print('Processing ', raw_name)
res = os.system('crds bestrefs --files {} --sync-references=1 --update-bestrefs '.format(raw_name))
if (res != 0):
print('CRDS did not run.')
fin = fits.open(raw_name, mode='update')
fin[0].header['CRCORR'] = CRCORR
fin[0].header['FLATCORR'] = FLATCORR
filt = fin[0].header['FILTER']
self.org_FF_file = fin[0].header['PFLTFILE']
fin[0].header['PFLTFILE'] = self.FF_file
fin.close()
calwf3(raw_name)
flt_name = (raw_name.split('_raw.fits')[0] + '_flt.fits')
if (not os.path.isfile(flt_name)):
print('raw_to_flt() failed to generate ', flt_name)
sys.exit(1)
return flt_name<|docstring|>Function to run CALWF3 on a raw dataset. CRCORR is set to OMIT and FLATCORR is set to perform.
If available, crds is ran to download and updat the RAW file header to point to the latest calibration files.
The flat-field calibration file names are replaced with the ones included in this package and pointed to by
G102_FF and G141_FF.
Attributes
----------
None
Output
------
string containing the name of the FLT file that was created<|endoftext|> |
384ed5d5a08f822485114e63501cbbc13e0b807576464015918a53c3a86a79a9 | def ima_to_flt(self, ima_name):
'\n Function to run CALWF3 on an exisiting IMA file. CRCORR is set to PERFORM.\n\n Attributes\n ----------\n ima_name string containing the name of the IMA file to process\n\n Output\n ------\n string containing the name of the FLT file that has been created\n '
import wfc3tools
from wfc3tools import wf3ir
CRCORR = 'PERFORM'
fin = fits.open(ima_name, mode='update')
fin[0].header['CRCORR'] = CRCORR
fin.close()
obs_id = ima_name.split('_ima.fits')[0]
flt_name = ('%s_flt.fits' % obs_id)
if os.path.isfile(flt_name):
os.unlink(flt_name)
tmp_name = ('%s_ima_ima.fits' % obs_id)
if os.path.isfile(tmp_name):
os.unlink(tmp_name)
tmp_name = ('%s_ima_flt.fits' % obs_id)
if os.path.isfile(tmp_name):
os.unlink(tmp_name)
wf3ir(ima_name)
shutil.move(tmp_name, flt_name)
tmp_name = ('%s_ima_ima.fits' % obs_id)
if os.path.isfile(tmp_name):
os.unlink(tmp_name)
tmp = fits.open(flt_name)[1].data
if (not os.path.isfile(flt_name)):
print('raw_to_flt() failed to generate ', flt_name)
sys.exit(1)
return flt_name | Function to run CALWF3 on an exisiting IMA file. CRCORR is set to PERFORM.
Attributes
----------
ima_name string containing the name of the IMA file to process
Output
------
string containing the name of the FLT file that has been created | WFC3_Back_Sub/back_sub.py | ima_to_flt | npirzkal/WFC3_Back_sub | 0 | python | def ima_to_flt(self, ima_name):
'\n Function to run CALWF3 on an exisiting IMA file. CRCORR is set to PERFORM.\n\n Attributes\n ----------\n ima_name string containing the name of the IMA file to process\n\n Output\n ------\n string containing the name of the FLT file that has been created\n '
import wfc3tools
from wfc3tools import wf3ir
CRCORR = 'PERFORM'
fin = fits.open(ima_name, mode='update')
fin[0].header['CRCORR'] = CRCORR
fin.close()
obs_id = ima_name.split('_ima.fits')[0]
flt_name = ('%s_flt.fits' % obs_id)
if os.path.isfile(flt_name):
os.unlink(flt_name)
tmp_name = ('%s_ima_ima.fits' % obs_id)
if os.path.isfile(tmp_name):
os.unlink(tmp_name)
tmp_name = ('%s_ima_flt.fits' % obs_id)
if os.path.isfile(tmp_name):
os.unlink(tmp_name)
wf3ir(ima_name)
shutil.move(tmp_name, flt_name)
tmp_name = ('%s_ima_ima.fits' % obs_id)
if os.path.isfile(tmp_name):
os.unlink(tmp_name)
tmp = fits.open(flt_name)[1].data
if (not os.path.isfile(flt_name)):
print('raw_to_flt() failed to generate ', flt_name)
sys.exit(1)
return flt_name | def ima_to_flt(self, ima_name):
'\n Function to run CALWF3 on an exisiting IMA file. CRCORR is set to PERFORM.\n\n Attributes\n ----------\n ima_name string containing the name of the IMA file to process\n\n Output\n ------\n string containing the name of the FLT file that has been created\n '
import wfc3tools
from wfc3tools import wf3ir
CRCORR = 'PERFORM'
fin = fits.open(ima_name, mode='update')
fin[0].header['CRCORR'] = CRCORR
fin.close()
obs_id = ima_name.split('_ima.fits')[0]
flt_name = ('%s_flt.fits' % obs_id)
if os.path.isfile(flt_name):
os.unlink(flt_name)
tmp_name = ('%s_ima_ima.fits' % obs_id)
if os.path.isfile(tmp_name):
os.unlink(tmp_name)
tmp_name = ('%s_ima_flt.fits' % obs_id)
if os.path.isfile(tmp_name):
os.unlink(tmp_name)
wf3ir(ima_name)
shutil.move(tmp_name, flt_name)
tmp_name = ('%s_ima_ima.fits' % obs_id)
if os.path.isfile(tmp_name):
os.unlink(tmp_name)
tmp = fits.open(flt_name)[1].data
if (not os.path.isfile(flt_name)):
print('raw_to_flt() failed to generate ', flt_name)
sys.exit(1)
return flt_name<|docstring|>Function to run CALWF3 on an exisiting IMA file. CRCORR is set to PERFORM.
Attributes
----------
ima_name string containing the name of the IMA file to process
Output
------
string containing the name of the FLT file that has been created<|endoftext|> |
db226ff3cbc1aa9d315df609adfb42fe597ec7d8245959eab08dea6c9eee8063 | def create_msk(self, flt_name, kernel_fwhm=1.25, background_box=((1014 // 6), 2), thr=0.05, npixels=80):
' \n This function will create a FITS files ipppssoot_msk.fits \n\n Attributes\n ----------\n flt_name string containing the name of the FLT name to create a mask for\n kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)\n background_box Int The saie fo the background box when estimating the background (default = (1014//6,1) pixels) \n thr Float Threshold above noise to detect signal (default = 0.05)\n npixels Int number of pixels for a spectrum to be detected (default = 100) \n\n Output\n ______\n String containing the name of the MSK file\n '
segm = self.get_mask(flt_name, kernel_fwhm=kernel_fwhm, background_box=background_box, thr=thr, npixels=npixels)
dq3 = fits.open(flt_name)['DQ'].data.astype(int)
dq3 = dq3.astype(int)
DQ = np.bitwise_and(dq3, (np.zeros(np.shape(dq3), np.int) + self.bit_mask))
kernel = Gaussian2DKernel(x_stddev=1)
segm = (segm * 1.0)
segm[(segm > 1e-05)] = 1.0
segm[(segm <= 1e-05)] = 0.0
segm[(DQ > 0)] = 1.0
msk_name = (flt_name.split('_flt.fits')[0] + '_msk.fits')
fits.writeto(msk_name, segm, overwrite=True)
return segm | This function will create a FITS files ipppssoot_msk.fits
Attributes
----------
flt_name string containing the name of the FLT name to create a mask for
kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)
background_box Int The saie fo the background box when estimating the background (default = (1014//6,1) pixels)
thr Float Threshold above noise to detect signal (default = 0.05)
npixels Int number of pixels for a spectrum to be detected (default = 100)
Output
______
String containing the name of the MSK file | WFC3_Back_Sub/back_sub.py | create_msk | npirzkal/WFC3_Back_sub | 0 | python | def create_msk(self, flt_name, kernel_fwhm=1.25, background_box=((1014 // 6), 2), thr=0.05, npixels=80):
' \n This function will create a FITS files ipppssoot_msk.fits \n\n Attributes\n ----------\n flt_name string containing the name of the FLT name to create a mask for\n kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)\n background_box Int The saie fo the background box when estimating the background (default = (1014//6,1) pixels) \n thr Float Threshold above noise to detect signal (default = 0.05)\n npixels Int number of pixels for a spectrum to be detected (default = 100) \n\n Output\n ______\n String containing the name of the MSK file\n '
segm = self.get_mask(flt_name, kernel_fwhm=kernel_fwhm, background_box=background_box, thr=thr, npixels=npixels)
dq3 = fits.open(flt_name)['DQ'].data.astype(int)
dq3 = dq3.astype(int)
DQ = np.bitwise_and(dq3, (np.zeros(np.shape(dq3), np.int) + self.bit_mask))
kernel = Gaussian2DKernel(x_stddev=1)
segm = (segm * 1.0)
segm[(segm > 1e-05)] = 1.0
segm[(segm <= 1e-05)] = 0.0
segm[(DQ > 0)] = 1.0
msk_name = (flt_name.split('_flt.fits')[0] + '_msk.fits')
fits.writeto(msk_name, segm, overwrite=True)
return segm | def create_msk(self, flt_name, kernel_fwhm=1.25, background_box=((1014 // 6), 2), thr=0.05, npixels=80):
' \n This function will create a FITS files ipppssoot_msk.fits \n\n Attributes\n ----------\n flt_name string containing the name of the FLT name to create a mask for\n kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)\n background_box Int The saie fo the background box when estimating the background (default = (1014//6,1) pixels) \n thr Float Threshold above noise to detect signal (default = 0.05)\n npixels Int number of pixels for a spectrum to be detected (default = 100) \n\n Output\n ______\n String containing the name of the MSK file\n '
segm = self.get_mask(flt_name, kernel_fwhm=kernel_fwhm, background_box=background_box, thr=thr, npixels=npixels)
dq3 = fits.open(flt_name)['DQ'].data.astype(int)
dq3 = dq3.astype(int)
DQ = np.bitwise_and(dq3, (np.zeros(np.shape(dq3), np.int) + self.bit_mask))
kernel = Gaussian2DKernel(x_stddev=1)
segm = (segm * 1.0)
segm[(segm > 1e-05)] = 1.0
segm[(segm <= 1e-05)] = 0.0
segm[(DQ > 0)] = 1.0
msk_name = (flt_name.split('_flt.fits')[0] + '_msk.fits')
fits.writeto(msk_name, segm, overwrite=True)
return segm<|docstring|>This function will create a FITS files ipppssoot_msk.fits
Attributes
----------
flt_name string containing the name of the FLT name to create a mask for
kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)
background_box Int The saie fo the background box when estimating the background (default = (1014//6,1) pixels)
thr Float Threshold above noise to detect signal (default = 0.05)
npixels Int number of pixels for a spectrum to be detected (default = 100)
Output
______
String containing the name of the MSK file<|endoftext|> |
59a2b6d4c380b37ce43c9ee9985547c0a3e14daf594f6095429aaa01ad36e622 | def get_mask(self, flt_name, kernel_fwhm=1.25, background_box=20, thr=0.05, npixels=100):
'\n Function to create a mask (set to 0 for no detection and 1 for detection) appropriate to mask WFC3 slitless data. \n Attributes\n ----------\n flt_name string containing the name of the FLT name to create a mask for\n kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)\n background_box Int The saie fo the background box when estimating the background (default = 20 pixels) \n thr Float Threshold above noise to detect signal (default = 0.25)\n npixels Int number of pixels for a spectrum to be detected (default = 15) \n\n Output\n ------\n A numpy array containing the mask\n '
h = fits.open(flt_name)[0].header
filt = h['FILTER']
fin = fits.open(flt_name)
image = fin['SCI'].data
err = fin['ERR'].data
dq = fin['DQ'].data
dq = np.bitwise_and(dq, (np.zeros(np.shape(dq), np.int16) + self.bit_mask))
g = Gaussian1D(mean=0.0, stddev=(kernel_fwhm / 2.35))
x = (np.arange(16.0) - 8)
a = g(x)
kernel = np.tile(a, ((16 * int((kernel_fwhm + 1))), 1)).T
kernel = (kernel / np.sum(kernel))
b = Background2D(image, background_box)
image = (image - b.background)
threshold = (thr * err)
image[(dq > 0)] = 0.0
mask = detect_sources(image, threshold, npixels=npixels, filter_kernel=kernel).data
ok = ((mask == 0.0) & (dq == 0))
mask[(~ ok)] = 1.0
return mask | Function to create a mask (set to 0 for no detection and 1 for detection) appropriate to mask WFC3 slitless data.
Attributes
----------
flt_name string containing the name of the FLT name to create a mask for
kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)
background_box Int The saie fo the background box when estimating the background (default = 20 pixels)
thr Float Threshold above noise to detect signal (default = 0.25)
npixels Int number of pixels for a spectrum to be detected (default = 15)
Output
------
A numpy array containing the mask | WFC3_Back_Sub/back_sub.py | get_mask | npirzkal/WFC3_Back_sub | 0 | python | def get_mask(self, flt_name, kernel_fwhm=1.25, background_box=20, thr=0.05, npixels=100):
'\n Function to create a mask (set to 0 for no detection and 1 for detection) appropriate to mask WFC3 slitless data. \n Attributes\n ----------\n flt_name string containing the name of the FLT name to create a mask for\n kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)\n background_box Int The saie fo the background box when estimating the background (default = 20 pixels) \n thr Float Threshold above noise to detect signal (default = 0.25)\n npixels Int number of pixels for a spectrum to be detected (default = 15) \n\n Output\n ------\n A numpy array containing the mask\n '
h = fits.open(flt_name)[0].header
filt = h['FILTER']
fin = fits.open(flt_name)
image = fin['SCI'].data
err = fin['ERR'].data
dq = fin['DQ'].data
dq = np.bitwise_and(dq, (np.zeros(np.shape(dq), np.int16) + self.bit_mask))
g = Gaussian1D(mean=0.0, stddev=(kernel_fwhm / 2.35))
x = (np.arange(16.0) - 8)
a = g(x)
kernel = np.tile(a, ((16 * int((kernel_fwhm + 1))), 1)).T
kernel = (kernel / np.sum(kernel))
b = Background2D(image, background_box)
image = (image - b.background)
threshold = (thr * err)
image[(dq > 0)] = 0.0
mask = detect_sources(image, threshold, npixels=npixels, filter_kernel=kernel).data
ok = ((mask == 0.0) & (dq == 0))
mask[(~ ok)] = 1.0
return mask | def get_mask(self, flt_name, kernel_fwhm=1.25, background_box=20, thr=0.05, npixels=100):
'\n Function to create a mask (set to 0 for no detection and 1 for detection) appropriate to mask WFC3 slitless data. \n Attributes\n ----------\n flt_name string containing the name of the FLT name to create a mask for\n kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)\n background_box Int The saie fo the background box when estimating the background (default = 20 pixels) \n thr Float Threshold above noise to detect signal (default = 0.25)\n npixels Int number of pixels for a spectrum to be detected (default = 15) \n\n Output\n ------\n A numpy array containing the mask\n '
h = fits.open(flt_name)[0].header
filt = h['FILTER']
fin = fits.open(flt_name)
image = fin['SCI'].data
err = fin['ERR'].data
dq = fin['DQ'].data
dq = np.bitwise_and(dq, (np.zeros(np.shape(dq), np.int16) + self.bit_mask))
g = Gaussian1D(mean=0.0, stddev=(kernel_fwhm / 2.35))
x = (np.arange(16.0) - 8)
a = g(x)
kernel = np.tile(a, ((16 * int((kernel_fwhm + 1))), 1)).T
kernel = (kernel / np.sum(kernel))
b = Background2D(image, background_box)
image = (image - b.background)
threshold = (thr * err)
image[(dq > 0)] = 0.0
mask = detect_sources(image, threshold, npixels=npixels, filter_kernel=kernel).data
ok = ((mask == 0.0) & (dq == 0))
mask[(~ ok)] = 1.0
return mask<|docstring|>Function to create a mask (set to 0 for no detection and 1 for detection) appropriate to mask WFC3 slitless data.
Attributes
----------
flt_name string containing the name of the FLT name to create a mask for
kernel_fwhm Float The size of the detection kernel (default = 1.25 pixel)
background_box Int The saie fo the background box when estimating the background (default = 20 pixels)
thr Float Threshold above noise to detect signal (default = 0.25)
npixels Int number of pixels for a spectrum to be detected (default = 15)
Output
------
A numpy array containing the mask<|endoftext|> |
4e7d20a45d733069d738e7a044d90a6691548575bf0fe35c68931ac224656af8 | def Get_HeI_Zodi_Scatter_Levels(self):
'\n Function to estimate the Zodi, HeI, and Scatter levels in each IMSET of an IMA file.\n A set of IMA files can be processed at once and the Zodi level is assumed to be identical in all of them. The HeI and Scatter\n levels are allowed to vary freely. See code by R. Ryan in Appendix of ISR WFC3 2015-17 for details.\n\n Atributes\n ---------\n ima_names List A list containing the names of IMA files to process together. \n\n Output\n ------\n \n Zodi Float The value of Zodi scale\n HeIs Dic A dictionary containing all the HeI scale value for each IMSET in each IMA file\n Scats Dic A dictionary containing all the Scatter scale value for each IMSET in each IMA file\n\n '
ima_names = ['{}_ima.fits'.format(x) for x in self.obs_ids]
nimas = len(ima_names)
nexts = [fits.open(ima_name)[(- 1)].header['EXTVER'] for ima_name in ima_names]
filt = fits.open(ima_names[0])[0].header['FILTER']
print('NEXTS:', nexts)
zodi = (self.zodi * 1)
HeI = (self.HeI * 1)
Scatter = (self.Scatter * 1)
data0s = []
err0s = []
samp0s = []
dq0s = []
dqm0s = []
masks = []
for j in range(nimas):
obs_id = ima_names[j][0:9]
mask = fits.open('{}_msk.fits'.format(obs_id))[0].data
masks.append([mask for ext in range(1, nexts[j])])
data0s.append([(fits.open(ima_names[j])[('SCI', ext)].data[(5:(1014 + 5), 5:(1014 + 5))] * 1) for ext in range(1, nexts[j])])
err0s.append([(fits.open(ima_names[j])[('ERR', ext)].data[(5:(1014 + 5), 5:(1014 + 5))] * 1) for ext in range(1, nexts[j])])
dq0s.append([(fits.open(ima_names[j])[('DQ', ext)].data[(5:(1014 + 5), 5:(1014 + 5))] * 1) for ext in range(1, nexts[j])])
dqm0s = [[np.bitwise_and(dq0, (np.zeros(np.shape(dq0), np.int16) + self.bit_mask)) for dq0 in dq0s[j]] for j in range(nimas)]
ok = ((np.isfinite(zodi) & np.isfinite(HeI)) & np.isfinite(Scatter))
zodi[(~ ok)] = 0.0
HeI[(~ ok)] = 0.0
Scatter[(~ ok)] = 0.0
whts = []
for j in range(len(ima_names)):
whts_j = []
for i in range(len(err0s[j])):
err = err0s[j][i]
err[(err <= 1e-06)] = 1e-06
w = (1.0 / (err ** 2))
w[(~ ok)] = 0.0
whts_j.append(w)
whts.append(whts_j)
nflt = sum(nexts)
npar = ((2 * nflt) + 1)
print('We are solving for ', npar, ' HeI values')
v = np.zeros(npar, np.float)
m = np.zeros([npar, npar], np.float)
ii = (- 1)
for j in range(len(ima_names)):
whts[j] = np.array(whts[j])
data0s[j] = np.array(data0s[j])
masks[j] = np.array(masks[j])
dqm0s[j] = np.array(dqm0s[j])
whts[j][(~ np.isfinite(data0s[j]))] = 0.0
data0s[j][(~ np.isfinite(data0s[j]))] = 0.0
whts[j][(masks[j] > 0)] = 0.0
whts[j][(dqm0s[j] != 0)] = 0.0
for i in range(len(data0s[j])):
ii = (ii + 1)
print('name:', ima_names[j], 'imset:', (i + 1), ii)
img = data0s[j][i]
wht = whts[j][i]
if (self.border > 0):
wht[0:self.border] = 0.0
wht[(- self.border):0] = 0.0
wht[(:, 0:self.border)] = 0.0
wht[(:, (- self.border):0)] = 0.0
v[ii] = np.sum(((wht * data0s[j][i]) * HeI))
v[(- 1)] += np.sum(((wht * data0s[j][i]) * zodi))
m[(ii, ii)] = np.sum(((wht * HeI) * HeI))
m[(ii, (- 1))] = np.sum(((wht * HeI) * zodi))
m[((- 1), ii)] = m[(ii, (- 1))]
m[((- 1), (- 1))] += np.sum(((wht * zodi) * zodi))
v[(ii + nflt)] = np.sum(((wht * data0s[j][i]) * Scatter))
m[((ii + nflt), (ii + nflt))] = np.sum(((wht * Scatter) * Scatter))
m[(ii, (ii + nflt))] = np.sum(((wht * HeI) * Scatter))
m[((ii + nflt), (- 1))] = np.sum(((wht * zodi) * Scatter))
m[((ii + nflt), ii)] = m[(ii, (ii + nflt))]
m[((- 1), (ii + nflt))] = m[((ii + nflt), (- 1))]
if self.use_nnls:
res = optimize.nnls(m, v)
x = res[0]
else:
res = optimize.lsq_linear(m, v)
x = res.x
Zodi = x[(- 1)]
HeIs = {}
Scats = {}
ii = (- 1)
for j in range(len(data0s)):
HeIs[ima_names[j]] = {}
Scats[ima_names[j]] = {}
for i in range(len(data0s[j])):
ii = (ii + 1)
print(('%s %d Zodi: %3.3f He: %3.3f S: %3.3f' % (ima_names[j], i, x[(- 1)], x[ii], x[(ii + nflt)])))
HeIs[ima_names[j]][(i + 1)] = x[ii]
Scats[ima_names[j]][(i + 1)] = x[(ii + nflt)]
self.Zodi = Zodi
self.HeIs = HeIs
self.Scats = Scats | Function to estimate the Zodi, HeI, and Scatter levels in each IMSET of an IMA file.
A set of IMA files can be processed at once and the Zodi level is assumed to be identical in all of them. The HeI and Scatter
levels are allowed to vary freely. See code by R. Ryan in Appendix of ISR WFC3 2015-17 for details.
Atributes
---------
ima_names List A list containing the names of IMA files to process together.
Output
------
Zodi Float The value of Zodi scale
HeIs Dic A dictionary containing all the HeI scale value for each IMSET in each IMA file
Scats Dic A dictionary containing all the Scatter scale value for each IMSET in each IMA file | WFC3_Back_Sub/back_sub.py | Get_HeI_Zodi_Scatter_Levels | npirzkal/WFC3_Back_sub | 0 | python | def Get_HeI_Zodi_Scatter_Levels(self):
'\n Function to estimate the Zodi, HeI, and Scatter levels in each IMSET of an IMA file.\n A set of IMA files can be processed at once and the Zodi level is assumed to be identical in all of them. The HeI and Scatter\n levels are allowed to vary freely. See code by R. Ryan in Appendix of ISR WFC3 2015-17 for details.\n\n Atributes\n ---------\n ima_names List A list containing the names of IMA files to process together. \n\n Output\n ------\n \n Zodi Float The value of Zodi scale\n HeIs Dic A dictionary containing all the HeI scale value for each IMSET in each IMA file\n Scats Dic A dictionary containing all the Scatter scale value for each IMSET in each IMA file\n\n '
ima_names = ['{}_ima.fits'.format(x) for x in self.obs_ids]
nimas = len(ima_names)
nexts = [fits.open(ima_name)[(- 1)].header['EXTVER'] for ima_name in ima_names]
filt = fits.open(ima_names[0])[0].header['FILTER']
print('NEXTS:', nexts)
zodi = (self.zodi * 1)
HeI = (self.HeI * 1)
Scatter = (self.Scatter * 1)
data0s = []
err0s = []
samp0s = []
dq0s = []
dqm0s = []
masks = []
for j in range(nimas):
obs_id = ima_names[j][0:9]
mask = fits.open('{}_msk.fits'.format(obs_id))[0].data
masks.append([mask for ext in range(1, nexts[j])])
data0s.append([(fits.open(ima_names[j])[('SCI', ext)].data[(5:(1014 + 5), 5:(1014 + 5))] * 1) for ext in range(1, nexts[j])])
err0s.append([(fits.open(ima_names[j])[('ERR', ext)].data[(5:(1014 + 5), 5:(1014 + 5))] * 1) for ext in range(1, nexts[j])])
dq0s.append([(fits.open(ima_names[j])[('DQ', ext)].data[(5:(1014 + 5), 5:(1014 + 5))] * 1) for ext in range(1, nexts[j])])
dqm0s = [[np.bitwise_and(dq0, (np.zeros(np.shape(dq0), np.int16) + self.bit_mask)) for dq0 in dq0s[j]] for j in range(nimas)]
ok = ((np.isfinite(zodi) & np.isfinite(HeI)) & np.isfinite(Scatter))
zodi[(~ ok)] = 0.0
HeI[(~ ok)] = 0.0
Scatter[(~ ok)] = 0.0
whts = []
for j in range(len(ima_names)):
whts_j = []
for i in range(len(err0s[j])):
err = err0s[j][i]
err[(err <= 1e-06)] = 1e-06
w = (1.0 / (err ** 2))
w[(~ ok)] = 0.0
whts_j.append(w)
whts.append(whts_j)
nflt = sum(nexts)
npar = ((2 * nflt) + 1)
print('We are solving for ', npar, ' HeI values')
v = np.zeros(npar, np.float)
m = np.zeros([npar, npar], np.float)
ii = (- 1)
for j in range(len(ima_names)):
whts[j] = np.array(whts[j])
data0s[j] = np.array(data0s[j])
masks[j] = np.array(masks[j])
dqm0s[j] = np.array(dqm0s[j])
whts[j][(~ np.isfinite(data0s[j]))] = 0.0
data0s[j][(~ np.isfinite(data0s[j]))] = 0.0
whts[j][(masks[j] > 0)] = 0.0
whts[j][(dqm0s[j] != 0)] = 0.0
for i in range(len(data0s[j])):
ii = (ii + 1)
print('name:', ima_names[j], 'imset:', (i + 1), ii)
img = data0s[j][i]
wht = whts[j][i]
if (self.border > 0):
wht[0:self.border] = 0.0
wht[(- self.border):0] = 0.0
wht[(:, 0:self.border)] = 0.0
wht[(:, (- self.border):0)] = 0.0
v[ii] = np.sum(((wht * data0s[j][i]) * HeI))
v[(- 1)] += np.sum(((wht * data0s[j][i]) * zodi))
m[(ii, ii)] = np.sum(((wht * HeI) * HeI))
m[(ii, (- 1))] = np.sum(((wht * HeI) * zodi))
m[((- 1), ii)] = m[(ii, (- 1))]
m[((- 1), (- 1))] += np.sum(((wht * zodi) * zodi))
v[(ii + nflt)] = np.sum(((wht * data0s[j][i]) * Scatter))
m[((ii + nflt), (ii + nflt))] = np.sum(((wht * Scatter) * Scatter))
m[(ii, (ii + nflt))] = np.sum(((wht * HeI) * Scatter))
m[((ii + nflt), (- 1))] = np.sum(((wht * zodi) * Scatter))
m[((ii + nflt), ii)] = m[(ii, (ii + nflt))]
m[((- 1), (ii + nflt))] = m[((ii + nflt), (- 1))]
if self.use_nnls:
res = optimize.nnls(m, v)
x = res[0]
else:
res = optimize.lsq_linear(m, v)
x = res.x
Zodi = x[(- 1)]
HeIs = {}
Scats = {}
ii = (- 1)
for j in range(len(data0s)):
HeIs[ima_names[j]] = {}
Scats[ima_names[j]] = {}
for i in range(len(data0s[j])):
ii = (ii + 1)
print(('%s %d Zodi: %3.3f He: %3.3f S: %3.3f' % (ima_names[j], i, x[(- 1)], x[ii], x[(ii + nflt)])))
HeIs[ima_names[j]][(i + 1)] = x[ii]
Scats[ima_names[j]][(i + 1)] = x[(ii + nflt)]
self.Zodi = Zodi
self.HeIs = HeIs
self.Scats = Scats | def Get_HeI_Zodi_Scatter_Levels(self):
'\n Function to estimate the Zodi, HeI, and Scatter levels in each IMSET of an IMA file.\n A set of IMA files can be processed at once and the Zodi level is assumed to be identical in all of them. The HeI and Scatter\n levels are allowed to vary freely. See code by R. Ryan in Appendix of ISR WFC3 2015-17 for details.\n\n Atributes\n ---------\n ima_names List A list containing the names of IMA files to process together. \n\n Output\n ------\n \n Zodi Float The value of Zodi scale\n HeIs Dic A dictionary containing all the HeI scale value for each IMSET in each IMA file\n Scats Dic A dictionary containing all the Scatter scale value for each IMSET in each IMA file\n\n '
ima_names = ['{}_ima.fits'.format(x) for x in self.obs_ids]
nimas = len(ima_names)
nexts = [fits.open(ima_name)[(- 1)].header['EXTVER'] for ima_name in ima_names]
filt = fits.open(ima_names[0])[0].header['FILTER']
print('NEXTS:', nexts)
zodi = (self.zodi * 1)
HeI = (self.HeI * 1)
Scatter = (self.Scatter * 1)
data0s = []
err0s = []
samp0s = []
dq0s = []
dqm0s = []
masks = []
for j in range(nimas):
obs_id = ima_names[j][0:9]
mask = fits.open('{}_msk.fits'.format(obs_id))[0].data
masks.append([mask for ext in range(1, nexts[j])])
data0s.append([(fits.open(ima_names[j])[('SCI', ext)].data[(5:(1014 + 5), 5:(1014 + 5))] * 1) for ext in range(1, nexts[j])])
err0s.append([(fits.open(ima_names[j])[('ERR', ext)].data[(5:(1014 + 5), 5:(1014 + 5))] * 1) for ext in range(1, nexts[j])])
dq0s.append([(fits.open(ima_names[j])[('DQ', ext)].data[(5:(1014 + 5), 5:(1014 + 5))] * 1) for ext in range(1, nexts[j])])
dqm0s = [[np.bitwise_and(dq0, (np.zeros(np.shape(dq0), np.int16) + self.bit_mask)) for dq0 in dq0s[j]] for j in range(nimas)]
ok = ((np.isfinite(zodi) & np.isfinite(HeI)) & np.isfinite(Scatter))
zodi[(~ ok)] = 0.0
HeI[(~ ok)] = 0.0
Scatter[(~ ok)] = 0.0
whts = []
for j in range(len(ima_names)):
whts_j = []
for i in range(len(err0s[j])):
err = err0s[j][i]
err[(err <= 1e-06)] = 1e-06
w = (1.0 / (err ** 2))
w[(~ ok)] = 0.0
whts_j.append(w)
whts.append(whts_j)
nflt = sum(nexts)
npar = ((2 * nflt) + 1)
print('We are solving for ', npar, ' HeI values')
v = np.zeros(npar, np.float)
m = np.zeros([npar, npar], np.float)
ii = (- 1)
for j in range(len(ima_names)):
whts[j] = np.array(whts[j])
data0s[j] = np.array(data0s[j])
masks[j] = np.array(masks[j])
dqm0s[j] = np.array(dqm0s[j])
whts[j][(~ np.isfinite(data0s[j]))] = 0.0
data0s[j][(~ np.isfinite(data0s[j]))] = 0.0
whts[j][(masks[j] > 0)] = 0.0
whts[j][(dqm0s[j] != 0)] = 0.0
for i in range(len(data0s[j])):
ii = (ii + 1)
print('name:', ima_names[j], 'imset:', (i + 1), ii)
img = data0s[j][i]
wht = whts[j][i]
if (self.border > 0):
wht[0:self.border] = 0.0
wht[(- self.border):0] = 0.0
wht[(:, 0:self.border)] = 0.0
wht[(:, (- self.border):0)] = 0.0
v[ii] = np.sum(((wht * data0s[j][i]) * HeI))
v[(- 1)] += np.sum(((wht * data0s[j][i]) * zodi))
m[(ii, ii)] = np.sum(((wht * HeI) * HeI))
m[(ii, (- 1))] = np.sum(((wht * HeI) * zodi))
m[((- 1), ii)] = m[(ii, (- 1))]
m[((- 1), (- 1))] += np.sum(((wht * zodi) * zodi))
v[(ii + nflt)] = np.sum(((wht * data0s[j][i]) * Scatter))
m[((ii + nflt), (ii + nflt))] = np.sum(((wht * Scatter) * Scatter))
m[(ii, (ii + nflt))] = np.sum(((wht * HeI) * Scatter))
m[((ii + nflt), (- 1))] = np.sum(((wht * zodi) * Scatter))
m[((ii + nflt), ii)] = m[(ii, (ii + nflt))]
m[((- 1), (ii + nflt))] = m[((ii + nflt), (- 1))]
if self.use_nnls:
res = optimize.nnls(m, v)
x = res[0]
else:
res = optimize.lsq_linear(m, v)
x = res.x
Zodi = x[(- 1)]
HeIs = {}
Scats = {}
ii = (- 1)
for j in range(len(data0s)):
HeIs[ima_names[j]] = {}
Scats[ima_names[j]] = {}
for i in range(len(data0s[j])):
ii = (ii + 1)
print(('%s %d Zodi: %3.3f He: %3.3f S: %3.3f' % (ima_names[j], i, x[(- 1)], x[ii], x[(ii + nflt)])))
HeIs[ima_names[j]][(i + 1)] = x[ii]
Scats[ima_names[j]][(i + 1)] = x[(ii + nflt)]
self.Zodi = Zodi
self.HeIs = HeIs
self.Scats = Scats<|docstring|>Function to estimate the Zodi, HeI, and Scatter levels in each IMSET of an IMA file.
A set of IMA files can be processed at once and the Zodi level is assumed to be identical in all of them. The HeI and Scatter
levels are allowed to vary freely. See code by R. Ryan in Appendix of ISR WFC3 2015-17 for details.
Atributes
---------
ima_names List A list containing the names of IMA files to process together.
Output
------
Zodi Float The value of Zodi scale
HeIs Dic A dictionary containing all the HeI scale value for each IMSET in each IMA file
Scats Dic A dictionary containing all the Scatter scale value for each IMSET in each IMA file<|endoftext|> |
3ef90790a1dfef0e7c0e3e5c13c1aa175f6767a1af38acdf9d5f2466131b46e5 | def sub_HeI_Scat(self):
'\n Function to subtract the appropriately scaled HeI and Scatter light models from each of the IMSET of the IMA files \n included in the HeIs and Scats dictionaries. Header keywords are populated to reflect the amount of HeI and Scattered light\n subtracted. Function will fail to run a second time on a dataset.\n\n '
for f in self.HeIs.keys():
print('Updating ', f)
fin = fits.open(f, mode='update')
filt = fin[0].header['FILTER']
zodi = (self.zodi * 1)
HeI = (self.HeI * 1)
Scatter = (self.Scatter * 1)
for extver in self.HeIs[f].keys():
print('EXTVER:', extver)
try:
val = fin[('SCI', extver)].header['HeI']
print('HeI found in ', f, 'Aborting..')
continue
except:
pass
print('IMSET:', extver, 'subtracting', self.HeIs[f][extver], self.Scats[f][extver])
print('Before:', np.nanmedian(fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))]))
fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))] = ((fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))] - (self.HeIs[f][extver] * HeI)) - (self.Scats[f][extver] * Scatter))
print('After:', np.nanmedian(fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))]))
fin[('SCI', extver)].header['HeI_{}'.format(extver)] = (self.HeIs[f][extver], 'HeI level subtracted (e-)')
fin[('SCI', extver)].header['Scat_{}'.format(extver)] = (self.Scats[f][extver], 'Scat level estimated (e-)')
fin.close() | Function to subtract the appropriately scaled HeI and Scatter light models from each of the IMSET of the IMA files
included in the HeIs and Scats dictionaries. Header keywords are populated to reflect the amount of HeI and Scattered light
subtracted. Function will fail to run a second time on a dataset. | WFC3_Back_Sub/back_sub.py | sub_HeI_Scat | npirzkal/WFC3_Back_sub | 0 | python | def sub_HeI_Scat(self):
'\n Function to subtract the appropriately scaled HeI and Scatter light models from each of the IMSET of the IMA files \n included in the HeIs and Scats dictionaries. Header keywords are populated to reflect the amount of HeI and Scattered light\n subtracted. Function will fail to run a second time on a dataset.\n\n '
for f in self.HeIs.keys():
print('Updating ', f)
fin = fits.open(f, mode='update')
filt = fin[0].header['FILTER']
zodi = (self.zodi * 1)
HeI = (self.HeI * 1)
Scatter = (self.Scatter * 1)
for extver in self.HeIs[f].keys():
print('EXTVER:', extver)
try:
val = fin[('SCI', extver)].header['HeI']
print('HeI found in ', f, 'Aborting..')
continue
except:
pass
print('IMSET:', extver, 'subtracting', self.HeIs[f][extver], self.Scats[f][extver])
print('Before:', np.nanmedian(fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))]))
fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))] = ((fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))] - (self.HeIs[f][extver] * HeI)) - (self.Scats[f][extver] * Scatter))
print('After:', np.nanmedian(fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))]))
fin[('SCI', extver)].header['HeI_{}'.format(extver)] = (self.HeIs[f][extver], 'HeI level subtracted (e-)')
fin[('SCI', extver)].header['Scat_{}'.format(extver)] = (self.Scats[f][extver], 'Scat level estimated (e-)')
fin.close() | def sub_HeI_Scat(self):
'\n Function to subtract the appropriately scaled HeI and Scatter light models from each of the IMSET of the IMA files \n included in the HeIs and Scats dictionaries. Header keywords are populated to reflect the amount of HeI and Scattered light\n subtracted. Function will fail to run a second time on a dataset.\n\n '
for f in self.HeIs.keys():
print('Updating ', f)
fin = fits.open(f, mode='update')
filt = fin[0].header['FILTER']
zodi = (self.zodi * 1)
HeI = (self.HeI * 1)
Scatter = (self.Scatter * 1)
for extver in self.HeIs[f].keys():
print('EXTVER:', extver)
try:
val = fin[('SCI', extver)].header['HeI']
print('HeI found in ', f, 'Aborting..')
continue
except:
pass
print('IMSET:', extver, 'subtracting', self.HeIs[f][extver], self.Scats[f][extver])
print('Before:', np.nanmedian(fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))]))
fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))] = ((fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))] - (self.HeIs[f][extver] * HeI)) - (self.Scats[f][extver] * Scatter))
print('After:', np.nanmedian(fin[('SCI', extver)].data[(5:(1014 + 5), 5:(1014 + 5))]))
fin[('SCI', extver)].header['HeI_{}'.format(extver)] = (self.HeIs[f][extver], 'HeI level subtracted (e-)')
fin[('SCI', extver)].header['Scat_{}'.format(extver)] = (self.Scats[f][extver], 'Scat level estimated (e-)')
fin.close()<|docstring|>Function to subtract the appropriately scaled HeI and Scatter light models from each of the IMSET of the IMA files
included in the HeIs and Scats dictionaries. Header keywords are populated to reflect the amount of HeI and Scattered light
subtracted. Function will fail to run a second time on a dataset.<|endoftext|> |
89e703e70e4df2d4ca19d3f6aa3a9d0429e662a0aabe09094dd6503c6deb1dfc | def sub_Zodi(self, flt_name):
'\n Function to re-compute and subtract the Zodi component from an FLT file. A match MSK file is assumed to exist.\n\n Attributes\n ----------\n flt_name String containing the name of the FLT file to process\n\n Output\n ------\n None\n\n '
obs_id = os.path.split(flt_name)[(- 1)][0:9]
fin = fits.open(flt_name, mode='update')
try:
val = fin['SCI'].header['Zodi']
print('Subtracted Zodi level found in ', flt_name, 'Aborting..')
return
except:
pass
if (not os.path.isfile('{}_msk.fits'.format(obs_id))):
print('sub_Zodi could not find the MSK file ', '{}_msk.fits'.format(obs_id))
sys.exit(1)
filt = fin[0].header['FILTER']
if self.recompute_Zodi:
d = fin['SCI'].data
dq0 = fin['DQ'].data
dq = np.bitwise_and(dq0, (np.zeros(np.shape(dq0), np.int16) + self.bit_mask))
msk = fits.open('{}_msk.fits'.format(obs_id))[0].data
ok = ((msk == 0) & (dq == 0))
tmp = (d * 1.0)
tmp[(~ ok)] = np.nan
zodi = np.nanmedian((tmp / self.zodi))
print('=================> Zodi, Scale', self.Zodi, 'this scale:', zodi)
else:
zodi = (self.Zodi * 1)
fin['SCI'].data = (fin['SCI'].data - (zodi * self.zodi))
fin['SCI'].header['Zodi'] = (self.Zodi, 'Zodi level estimated (e-)')
fin.close(output_verify='ignore') | Function to re-compute and subtract the Zodi component from an FLT file. A match MSK file is assumed to exist.
Attributes
----------
flt_name String containing the name of the FLT file to process
Output
------
None | WFC3_Back_Sub/back_sub.py | sub_Zodi | npirzkal/WFC3_Back_sub | 0 | python | def sub_Zodi(self, flt_name):
'\n Function to re-compute and subtract the Zodi component from an FLT file. A match MSK file is assumed to exist.\n\n Attributes\n ----------\n flt_name String containing the name of the FLT file to process\n\n Output\n ------\n None\n\n '
obs_id = os.path.split(flt_name)[(- 1)][0:9]
fin = fits.open(flt_name, mode='update')
try:
val = fin['SCI'].header['Zodi']
print('Subtracted Zodi level found in ', flt_name, 'Aborting..')
return
except:
pass
if (not os.path.isfile('{}_msk.fits'.format(obs_id))):
print('sub_Zodi could not find the MSK file ', '{}_msk.fits'.format(obs_id))
sys.exit(1)
filt = fin[0].header['FILTER']
if self.recompute_Zodi:
d = fin['SCI'].data
dq0 = fin['DQ'].data
dq = np.bitwise_and(dq0, (np.zeros(np.shape(dq0), np.int16) + self.bit_mask))
msk = fits.open('{}_msk.fits'.format(obs_id))[0].data
ok = ((msk == 0) & (dq == 0))
tmp = (d * 1.0)
tmp[(~ ok)] = np.nan
zodi = np.nanmedian((tmp / self.zodi))
print('=================> Zodi, Scale', self.Zodi, 'this scale:', zodi)
else:
zodi = (self.Zodi * 1)
fin['SCI'].data = (fin['SCI'].data - (zodi * self.zodi))
fin['SCI'].header['Zodi'] = (self.Zodi, 'Zodi level estimated (e-)')
fin.close(output_verify='ignore') | def sub_Zodi(self, flt_name):
'\n Function to re-compute and subtract the Zodi component from an FLT file. A match MSK file is assumed to exist.\n\n Attributes\n ----------\n flt_name String containing the name of the FLT file to process\n\n Output\n ------\n None\n\n '
obs_id = os.path.split(flt_name)[(- 1)][0:9]
fin = fits.open(flt_name, mode='update')
try:
val = fin['SCI'].header['Zodi']
print('Subtracted Zodi level found in ', flt_name, 'Aborting..')
return
except:
pass
if (not os.path.isfile('{}_msk.fits'.format(obs_id))):
print('sub_Zodi could not find the MSK file ', '{}_msk.fits'.format(obs_id))
sys.exit(1)
filt = fin[0].header['FILTER']
if self.recompute_Zodi:
d = fin['SCI'].data
dq0 = fin['DQ'].data
dq = np.bitwise_and(dq0, (np.zeros(np.shape(dq0), np.int16) + self.bit_mask))
msk = fits.open('{}_msk.fits'.format(obs_id))[0].data
ok = ((msk == 0) & (dq == 0))
tmp = (d * 1.0)
tmp[(~ ok)] = np.nan
zodi = np.nanmedian((tmp / self.zodi))
print('=================> Zodi, Scale', self.Zodi, 'this scale:', zodi)
else:
zodi = (self.Zodi * 1)
fin['SCI'].data = (fin['SCI'].data - (zodi * self.zodi))
fin['SCI'].header['Zodi'] = (self.Zodi, 'Zodi level estimated (e-)')
fin.close(output_verify='ignore')<|docstring|>Function to re-compute and subtract the Zodi component from an FLT file. A match MSK file is assumed to exist.
Attributes
----------
flt_name String containing the name of the FLT file to process
Output
------
None<|endoftext|> |
4f06af9a677bbc7ecc0bcb78bf1c80cbc4de72a1b38fd2ef58e6f7f9b9044c0a | def plot_levels(self):
'Generate a plot showing the Zodi, HeI, and Scattered light levels for each IMSET read'
from astropy.io import fits
import matplotlib.pyplot as plt
for (n, f) in enumerate(self.flt_names):
print(f)
with fits.open(f) as fin:
h = fin[('SCI', 1)].header
zodi = h['ZODI']
xs = []
ys1 = []
ys2 = []
for i in range(1, h['BSAMP']):
TIME = h['TIME_{}'.format(i)]
DTIME = h['DTIME_{}'.format(i)]
STIME = (TIME - ((DTIME / 24) / 3600))
HeI = h['HeI_{}'.format(i)]
Scat = h['Scat_{}'.format(i)]
xs.append(STIME)
ys1.append(HeI)
ys2.append(Scat)
plt.axvspan(STIME, TIME, alpha=0.2)
print(xs, ys1)
plt.text(TIME, (- 0.2), f[0:9])
label = None
if (n == 0):
label = 'Zodiacal'
plt.axhline(zodi, label=label)
label = None
if (n == 0):
label = 'HeI'
plt.scatter(xs, ys1, color='g', label=label)
label = None
if (n == 0):
label = 'Scattered'
plt.scatter(xs, ys2, color='r', label=label)
(bottom, top) = plt.ylim()
plt.ylim(bottom=(- 0.25), top=top)
plt.legend()
plt.grid()
plt.xlabel('UT Time (MJD)')
plt.ylabel('$e^-/s$') | Generate a plot showing the Zodi, HeI, and Scattered light levels for each IMSET read | WFC3_Back_Sub/back_sub.py | plot_levels | npirzkal/WFC3_Back_sub | 0 | python | def plot_levels(self):
from astropy.io import fits
import matplotlib.pyplot as plt
for (n, f) in enumerate(self.flt_names):
print(f)
with fits.open(f) as fin:
h = fin[('SCI', 1)].header
zodi = h['ZODI']
xs = []
ys1 = []
ys2 = []
for i in range(1, h['BSAMP']):
TIME = h['TIME_{}'.format(i)]
DTIME = h['DTIME_{}'.format(i)]
STIME = (TIME - ((DTIME / 24) / 3600))
HeI = h['HeI_{}'.format(i)]
Scat = h['Scat_{}'.format(i)]
xs.append(STIME)
ys1.append(HeI)
ys2.append(Scat)
plt.axvspan(STIME, TIME, alpha=0.2)
print(xs, ys1)
plt.text(TIME, (- 0.2), f[0:9])
label = None
if (n == 0):
label = 'Zodiacal'
plt.axhline(zodi, label=label)
label = None
if (n == 0):
label = 'HeI'
plt.scatter(xs, ys1, color='g', label=label)
label = None
if (n == 0):
label = 'Scattered'
plt.scatter(xs, ys2, color='r', label=label)
(bottom, top) = plt.ylim()
plt.ylim(bottom=(- 0.25), top=top)
plt.legend()
plt.grid()
plt.xlabel('UT Time (MJD)')
plt.ylabel('$e^-/s$') | def plot_levels(self):
from astropy.io import fits
import matplotlib.pyplot as plt
for (n, f) in enumerate(self.flt_names):
print(f)
with fits.open(f) as fin:
h = fin[('SCI', 1)].header
zodi = h['ZODI']
xs = []
ys1 = []
ys2 = []
for i in range(1, h['BSAMP']):
TIME = h['TIME_{}'.format(i)]
DTIME = h['DTIME_{}'.format(i)]
STIME = (TIME - ((DTIME / 24) / 3600))
HeI = h['HeI_{}'.format(i)]
Scat = h['Scat_{}'.format(i)]
xs.append(STIME)
ys1.append(HeI)
ys2.append(Scat)
plt.axvspan(STIME, TIME, alpha=0.2)
print(xs, ys1)
plt.text(TIME, (- 0.2), f[0:9])
label = None
if (n == 0):
label = 'Zodiacal'
plt.axhline(zodi, label=label)
label = None
if (n == 0):
label = 'HeI'
plt.scatter(xs, ys1, color='g', label=label)
label = None
if (n == 0):
label = 'Scattered'
plt.scatter(xs, ys2, color='r', label=label)
(bottom, top) = plt.ylim()
plt.ylim(bottom=(- 0.25), top=top)
plt.legend()
plt.grid()
plt.xlabel('UT Time (MJD)')
plt.ylabel('$e^-/s$')<|docstring|>Generate a plot showing the Zodi, HeI, and Scattered light levels for each IMSET read<|endoftext|> |
b4771487f2bf811660546fde2761fe2e7d37f3a334a14afcbed0b9fddf257c00 | def diagnostic_plots(self):
'\n Function to output diagnostic plots for each of the processed observations, plotting the median residuals in the\n final background subtracted FLT files (after applying the detection mask).\n Attributes\n ----------\n obs_ids List containing the IDs of the FLT files to process\n\n Output\n ------\n Name of the plot file\n '
from astropy.io import fits
import numpy as np
import scipy
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10, (2.5 * (len(self.obs_ids) + 1)))
fig = plt.figure()
plt.clf()
plt.subplot((len(self.obs_ids) + 1), 1, 1)
self.plot_levels()
for (i, obs) in enumerate(self.obs_ids):
plt.subplot((len(self.obs_ids) + 1), 1, (i + 2))
f = '{}_flt.fits'.format(obs)
d = fits.open(f)[1].data
dq = fits.open(f)['DQ'].data
dq = np.bitwise_and(dq, (np.zeros(np.shape(dq), np.int16) + self.bit_mask))
f = '{}_msk.fits'.format(obs)
m = fits.open(f)[0].data
ok = ((m == 0) & np.isfinite(d))
d[(m > 0)] = np.nan
plt.plot(np.nanmedian(d, axis=0), label=obs)
plt.grid()
plt.ylabel('e-/s')
plt.xlabel('col')
plt.xlim(0, 1014)
plt.ylim((- 0.02), 0.02)
plt.legend()
plt.tight_layout()
oname = '{}_diag.png'.format(self.obs_ids[0][0:6])
plt.savefig(oname)
return '{}_diag.png'.format(self.obs_ids[0][0:6]) | Function to output diagnostic plots for each of the processed observations, plotting the median residuals in the
final background subtracted FLT files (after applying the detection mask).
Attributes
----------
obs_ids List containing the IDs of the FLT files to process
Output
------
Name of the plot file | WFC3_Back_Sub/back_sub.py | diagnostic_plots | npirzkal/WFC3_Back_sub | 0 | python | def diagnostic_plots(self):
'\n Function to output diagnostic plots for each of the processed observations, plotting the median residuals in the\n final background subtracted FLT files (after applying the detection mask).\n Attributes\n ----------\n obs_ids List containing the IDs of the FLT files to process\n\n Output\n ------\n Name of the plot file\n '
from astropy.io import fits
import numpy as np
import scipy
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10, (2.5 * (len(self.obs_ids) + 1)))
fig = plt.figure()
plt.clf()
plt.subplot((len(self.obs_ids) + 1), 1, 1)
self.plot_levels()
for (i, obs) in enumerate(self.obs_ids):
plt.subplot((len(self.obs_ids) + 1), 1, (i + 2))
f = '{}_flt.fits'.format(obs)
d = fits.open(f)[1].data
dq = fits.open(f)['DQ'].data
dq = np.bitwise_and(dq, (np.zeros(np.shape(dq), np.int16) + self.bit_mask))
f = '{}_msk.fits'.format(obs)
m = fits.open(f)[0].data
ok = ((m == 0) & np.isfinite(d))
d[(m > 0)] = np.nan
plt.plot(np.nanmedian(d, axis=0), label=obs)
plt.grid()
plt.ylabel('e-/s')
plt.xlabel('col')
plt.xlim(0, 1014)
plt.ylim((- 0.02), 0.02)
plt.legend()
plt.tight_layout()
oname = '{}_diag.png'.format(self.obs_ids[0][0:6])
plt.savefig(oname)
return '{}_diag.png'.format(self.obs_ids[0][0:6]) | def diagnostic_plots(self):
'\n Function to output diagnostic plots for each of the processed observations, plotting the median residuals in the\n final background subtracted FLT files (after applying the detection mask).\n Attributes\n ----------\n obs_ids List containing the IDs of the FLT files to process\n\n Output\n ------\n Name of the plot file\n '
from astropy.io import fits
import numpy as np
import scipy
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10, (2.5 * (len(self.obs_ids) + 1)))
fig = plt.figure()
plt.clf()
plt.subplot((len(self.obs_ids) + 1), 1, 1)
self.plot_levels()
for (i, obs) in enumerate(self.obs_ids):
plt.subplot((len(self.obs_ids) + 1), 1, (i + 2))
f = '{}_flt.fits'.format(obs)
d = fits.open(f)[1].data
dq = fits.open(f)['DQ'].data
dq = np.bitwise_and(dq, (np.zeros(np.shape(dq), np.int16) + self.bit_mask))
f = '{}_msk.fits'.format(obs)
m = fits.open(f)[0].data
ok = ((m == 0) & np.isfinite(d))
d[(m > 0)] = np.nan
plt.plot(np.nanmedian(d, axis=0), label=obs)
plt.grid()
plt.ylabel('e-/s')
plt.xlabel('col')
plt.xlim(0, 1014)
plt.ylim((- 0.02), 0.02)
plt.legend()
plt.tight_layout()
oname = '{}_diag.png'.format(self.obs_ids[0][0:6])
plt.savefig(oname)
return '{}_diag.png'.format(self.obs_ids[0][0:6])<|docstring|>Function to output diagnostic plots for each of the processed observations, plotting the median residuals in the
final background subtracted FLT files (after applying the detection mask).
Attributes
----------
obs_ids List containing the IDs of the FLT files to process
Output
------
Name of the plot file<|endoftext|> |
ad98b02219c43f5aae65d3fb9c9aaaa8113d6c51dcda553781f35c0146ee0235 | def restore_FF(self):
'\n Function to undo the flattening of an FLT file by de-applying the flat-field that was used and instead\n re-applying the default pipeline flat-field which only corrects for the quandrant gain values.\n '
f1 = os.path.join(os.environ['tref'], self.FF_file.split('$')[(- 1)])
f2 = os.path.join(os.environ['iref'], self.org_FF_file.split('$')[(- 1)])
with fits.open(f1) as fin:
FF = fin[1].data[(5:(1014 + 5), 5:(1014 + 5))]
with fits.open(f2) as fin:
org_FF = fin[1].data[(5:(1014 + 5), 5:(1014 + 5))]
for flt_name in self.flt_names:
with fits.open(flt_name, mode='update') as fin:
fin['SCI'].data = ((fin['SCI'].data * FF) / org_FF)
fin['ERR'].data = ((fin['ERR'].data * FF) / org_FF)
fin['SCI'].header['PFLTFILE'] = self.org_FF_file | Function to undo the flattening of an FLT file by de-applying the flat-field that was used and instead
re-applying the default pipeline flat-field which only corrects for the quandrant gain values. | WFC3_Back_Sub/back_sub.py | restore_FF | npirzkal/WFC3_Back_sub | 0 | python | def restore_FF(self):
'\n Function to undo the flattening of an FLT file by de-applying the flat-field that was used and instead\n re-applying the default pipeline flat-field which only corrects for the quandrant gain values.\n '
f1 = os.path.join(os.environ['tref'], self.FF_file.split('$')[(- 1)])
f2 = os.path.join(os.environ['iref'], self.org_FF_file.split('$')[(- 1)])
with fits.open(f1) as fin:
FF = fin[1].data[(5:(1014 + 5), 5:(1014 + 5))]
with fits.open(f2) as fin:
org_FF = fin[1].data[(5:(1014 + 5), 5:(1014 + 5))]
for flt_name in self.flt_names:
with fits.open(flt_name, mode='update') as fin:
fin['SCI'].data = ((fin['SCI'].data * FF) / org_FF)
fin['ERR'].data = ((fin['ERR'].data * FF) / org_FF)
fin['SCI'].header['PFLTFILE'] = self.org_FF_file | def restore_FF(self):
'\n Function to undo the flattening of an FLT file by de-applying the flat-field that was used and instead\n re-applying the default pipeline flat-field which only corrects for the quandrant gain values.\n '
f1 = os.path.join(os.environ['tref'], self.FF_file.split('$')[(- 1)])
f2 = os.path.join(os.environ['iref'], self.org_FF_file.split('$')[(- 1)])
with fits.open(f1) as fin:
FF = fin[1].data[(5:(1014 + 5), 5:(1014 + 5))]
with fits.open(f2) as fin:
org_FF = fin[1].data[(5:(1014 + 5), 5:(1014 + 5))]
for flt_name in self.flt_names:
with fits.open(flt_name, mode='update') as fin:
fin['SCI'].data = ((fin['SCI'].data * FF) / org_FF)
fin['ERR'].data = ((fin['ERR'].data * FF) / org_FF)
fin['SCI'].header['PFLTFILE'] = self.org_FF_file<|docstring|>Function to undo the flattening of an FLT file by de-applying the flat-field that was used and instead
re-applying the default pipeline flat-field which only corrects for the quandrant gain values.<|endoftext|> |
cda778aad89e082cc67bf06c0d88d15280cfbc5e71a599ae82c6176722d50e71 | def jump(self, nums):
'\n :type nums: List[int]\n :rtype: int\n '
if (len(nums) == 1):
return 0
lenNums = len(nums)
(start, end, cnt) = (0, nums[0], 1)
while (end < (lenNums - 1)):
newEnd = end
for i in range((start + 1), (end + 1)):
newEnd = max((i + nums[i]), newEnd)
start = end
end = newEnd
cnt += 1
return cnt | :type nums: List[int]
:rtype: int | Q45JumpGameII.py | jump | ChenliangLi205/LeetCode | 0 | python | def jump(self, nums):
'\n :type nums: List[int]\n :rtype: int\n '
if (len(nums) == 1):
return 0
lenNums = len(nums)
(start, end, cnt) = (0, nums[0], 1)
while (end < (lenNums - 1)):
newEnd = end
for i in range((start + 1), (end + 1)):
newEnd = max((i + nums[i]), newEnd)
start = end
end = newEnd
cnt += 1
return cnt | def jump(self, nums):
'\n :type nums: List[int]\n :rtype: int\n '
if (len(nums) == 1):
return 0
lenNums = len(nums)
(start, end, cnt) = (0, nums[0], 1)
while (end < (lenNums - 1)):
newEnd = end
for i in range((start + 1), (end + 1)):
newEnd = max((i + nums[i]), newEnd)
start = end
end = newEnd
cnt += 1
return cnt<|docstring|>:type nums: List[int]
:rtype: int<|endoftext|> |
b92d0312c1d91ab61f8f0463168c6f3a88ace94c5431e9688f98f68fff756f7e | def write_info(dirs, run_parallel, parallel, config):
'Write cluster or local filesystem resources, spinning up cluster if not present.\n '
if (parallel['type'] in ['ipython']):
out_file = _get_cache_file(dirs, parallel)
if (not utils.file_exists(out_file)):
sys_config = copy.deepcopy(config)
sys_config['algorithm']['resource_check'] = False
minfos = run_parallel('machine_info', [[sys_config]])
with open(out_file, 'w') as out_handle:
yaml.dump(minfos, out_handle, default_flow_style=False, allow_unicode=False) | Write cluster or local filesystem resources, spinning up cluster if not present. | bcbio/provenance/system.py | write_info | brentp/bcbio-nextgen | 1 | python | def write_info(dirs, run_parallel, parallel, config):
'\n '
if (parallel['type'] in ['ipython']):
out_file = _get_cache_file(dirs, parallel)
if (not utils.file_exists(out_file)):
sys_config = copy.deepcopy(config)
sys_config['algorithm']['resource_check'] = False
minfos = run_parallel('machine_info', [[sys_config]])
with open(out_file, 'w') as out_handle:
yaml.dump(minfos, out_handle, default_flow_style=False, allow_unicode=False) | def write_info(dirs, run_parallel, parallel, config):
'\n '
if (parallel['type'] in ['ipython']):
out_file = _get_cache_file(dirs, parallel)
if (not utils.file_exists(out_file)):
sys_config = copy.deepcopy(config)
sys_config['algorithm']['resource_check'] = False
minfos = run_parallel('machine_info', [[sys_config]])
with open(out_file, 'w') as out_handle:
yaml.dump(minfos, out_handle, default_flow_style=False, allow_unicode=False)<|docstring|>Write cluster or local filesystem resources, spinning up cluster if not present.<|endoftext|> |
ac5667ed6e3c413b6e0937208f7eadd37c35c6f55f345e9b837244968ee0b2cd | def get_info(dirs, parallel):
'Retrieve cluster or local filesystem resources from pre-retrieved information.\n '
if (parallel['type'] in ['ipython']):
cache_file = _get_cache_file(dirs, parallel)
if utils.file_exists(cache_file):
with open(cache_file) as in_handle:
minfo = yaml.load(in_handle)
return _combine_machine_info(minfo)
else:
return {}
else:
return _combine_machine_info(machine_info()) | Retrieve cluster or local filesystem resources from pre-retrieved information. | bcbio/provenance/system.py | get_info | brentp/bcbio-nextgen | 1 | python | def get_info(dirs, parallel):
'\n '
if (parallel['type'] in ['ipython']):
cache_file = _get_cache_file(dirs, parallel)
if utils.file_exists(cache_file):
with open(cache_file) as in_handle:
minfo = yaml.load(in_handle)
return _combine_machine_info(minfo)
else:
return {}
else:
return _combine_machine_info(machine_info()) | def get_info(dirs, parallel):
'\n '
if (parallel['type'] in ['ipython']):
cache_file = _get_cache_file(dirs, parallel)
if utils.file_exists(cache_file):
with open(cache_file) as in_handle:
minfo = yaml.load(in_handle)
return _combine_machine_info(minfo)
else:
return {}
else:
return _combine_machine_info(machine_info())<|docstring|>Retrieve cluster or local filesystem resources from pre-retrieved information.<|endoftext|> |
2b91ef68a6947758742d8451acc6862cfa1ba6d33a4961d33c679e57d0840e95 | def machine_info():
'Retrieve core and memory information for the current machine.\n '
BYTES_IN_GIG = 1073741824
free_bytes = psutil.virtual_memory().available
return [{'memory': int((free_bytes / BYTES_IN_GIG)), 'cores': multiprocessing.cpu_count(), 'name': socket.gethostname()}] | Retrieve core and memory information for the current machine. | bcbio/provenance/system.py | machine_info | brentp/bcbio-nextgen | 1 | python | def machine_info():
'\n '
BYTES_IN_GIG = 1073741824
free_bytes = psutil.virtual_memory().available
return [{'memory': int((free_bytes / BYTES_IN_GIG)), 'cores': multiprocessing.cpu_count(), 'name': socket.gethostname()}] | def machine_info():
'\n '
BYTES_IN_GIG = 1073741824
free_bytes = psutil.virtual_memory().available
return [{'memory': int((free_bytes / BYTES_IN_GIG)), 'cores': multiprocessing.cpu_count(), 'name': socket.gethostname()}]<|docstring|>Retrieve core and memory information for the current machine.<|endoftext|> |
ffbb67fd2dd49ded43b1d02035991b08eb0d8e272a696b97e48ac4df1eaae54f | def forward(self, x, record_len, pairwise_t_matrix):
'\n Fusion forwarding.\n \n Parameters\n ----------\n x : torch.Tensor\n input data, (B, C, H, W)\n \n record_len : list\n shape: (B)\n \n pairwise_t_matrix : torch.Tensor\n The transformation matrix from each cav to ego, \n shape: (B, L, L, 4, 4) \n \n Returns\n -------\n Fused feature.\n '
(_, C, H, W) = x.shape
(B, L) = pairwise_t_matrix.shape[:2]
split_x = self.regroup(x, record_len)
pairwise_t_matrix = get_discretized_transformation_matrix(pairwise_t_matrix.reshape((- 1), L, 4, 4), self.discrete_ratio, self.downsample_rate).reshape(B, L, L, 2, 3)
roi_mask = get_rotated_roi(((B * L), L, 1, H, W), pairwise_t_matrix.reshape(((B * L) * L), 2, 3))
roi_mask = roi_mask.reshape(B, L, L, 1, H, W)
batch_node_features = split_x
for l in range(self.num_iteration):
batch_updated_node_features = []
for b in range(B):
N = record_len[b]
t_matrix = pairwise_t_matrix[b][(:N, :N, :, :)]
updated_node_features = []
for i in range(N):
mask = roi_mask[(b, :N, i, ...)]
neighbor_feature = warp_affine(batch_node_features[b], t_matrix[(:, i, :, :)], (H, W))
ego_agent_feature = batch_node_features[b][i].unsqueeze(0).repeat(N, 1, 1, 1)
neighbor_feature = torch.cat([neighbor_feature, ego_agent_feature], dim=1)
message = (self.msg_cnn(neighbor_feature) * mask)
if (self.agg_operator == 'avg'):
agg_feature = torch.mean(message, dim=0)
elif (self.agg_operator == 'max'):
agg_feature = torch.max(message, dim=0)[0]
else:
raise ValueError('agg_operator has wrong value')
cat_feature = torch.cat([batch_node_features[b][(i, ...)], agg_feature], dim=0)
if self.gru_flag:
gru_out = self.conv_gru(cat_feature.unsqueeze(0).unsqueeze(0))[0][0].squeeze(0).squeeze(0)
else:
gru_out = (batch_node_features[b][(i, ...)] + agg_feature)
updated_node_features.append(gru_out.unsqueeze(0))
batch_updated_node_features.append(torch.cat(updated_node_features, dim=0))
batch_node_features = batch_updated_node_features
out = torch.cat([itm[(0, ...)].unsqueeze(0) for itm in batch_node_features], dim=0)
out = self.mlp(out.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return out | Fusion forwarding.
Parameters
----------
x : torch.Tensor
input data, (B, C, H, W)
record_len : list
shape: (B)
pairwise_t_matrix : torch.Tensor
The transformation matrix from each cav to ego,
shape: (B, L, L, 4, 4)
Returns
-------
Fused feature. | opencood/models/fuse_modules/v2v_fuse.py | forward | YuanYunshuang/OpenCOOD | 0 | python | def forward(self, x, record_len, pairwise_t_matrix):
'\n Fusion forwarding.\n \n Parameters\n ----------\n x : torch.Tensor\n input data, (B, C, H, W)\n \n record_len : list\n shape: (B)\n \n pairwise_t_matrix : torch.Tensor\n The transformation matrix from each cav to ego, \n shape: (B, L, L, 4, 4) \n \n Returns\n -------\n Fused feature.\n '
(_, C, H, W) = x.shape
(B, L) = pairwise_t_matrix.shape[:2]
split_x = self.regroup(x, record_len)
pairwise_t_matrix = get_discretized_transformation_matrix(pairwise_t_matrix.reshape((- 1), L, 4, 4), self.discrete_ratio, self.downsample_rate).reshape(B, L, L, 2, 3)
roi_mask = get_rotated_roi(((B * L), L, 1, H, W), pairwise_t_matrix.reshape(((B * L) * L), 2, 3))
roi_mask = roi_mask.reshape(B, L, L, 1, H, W)
batch_node_features = split_x
for l in range(self.num_iteration):
batch_updated_node_features = []
for b in range(B):
N = record_len[b]
t_matrix = pairwise_t_matrix[b][(:N, :N, :, :)]
updated_node_features = []
for i in range(N):
mask = roi_mask[(b, :N, i, ...)]
neighbor_feature = warp_affine(batch_node_features[b], t_matrix[(:, i, :, :)], (H, W))
ego_agent_feature = batch_node_features[b][i].unsqueeze(0).repeat(N, 1, 1, 1)
neighbor_feature = torch.cat([neighbor_feature, ego_agent_feature], dim=1)
message = (self.msg_cnn(neighbor_feature) * mask)
if (self.agg_operator == 'avg'):
agg_feature = torch.mean(message, dim=0)
elif (self.agg_operator == 'max'):
agg_feature = torch.max(message, dim=0)[0]
else:
raise ValueError('agg_operator has wrong value')
cat_feature = torch.cat([batch_node_features[b][(i, ...)], agg_feature], dim=0)
if self.gru_flag:
gru_out = self.conv_gru(cat_feature.unsqueeze(0).unsqueeze(0))[0][0].squeeze(0).squeeze(0)
else:
gru_out = (batch_node_features[b][(i, ...)] + agg_feature)
updated_node_features.append(gru_out.unsqueeze(0))
batch_updated_node_features.append(torch.cat(updated_node_features, dim=0))
batch_node_features = batch_updated_node_features
out = torch.cat([itm[(0, ...)].unsqueeze(0) for itm in batch_node_features], dim=0)
out = self.mlp(out.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return out | def forward(self, x, record_len, pairwise_t_matrix):
'\n Fusion forwarding.\n \n Parameters\n ----------\n x : torch.Tensor\n input data, (B, C, H, W)\n \n record_len : list\n shape: (B)\n \n pairwise_t_matrix : torch.Tensor\n The transformation matrix from each cav to ego, \n shape: (B, L, L, 4, 4) \n \n Returns\n -------\n Fused feature.\n '
(_, C, H, W) = x.shape
(B, L) = pairwise_t_matrix.shape[:2]
split_x = self.regroup(x, record_len)
pairwise_t_matrix = get_discretized_transformation_matrix(pairwise_t_matrix.reshape((- 1), L, 4, 4), self.discrete_ratio, self.downsample_rate).reshape(B, L, L, 2, 3)
roi_mask = get_rotated_roi(((B * L), L, 1, H, W), pairwise_t_matrix.reshape(((B * L) * L), 2, 3))
roi_mask = roi_mask.reshape(B, L, L, 1, H, W)
batch_node_features = split_x
for l in range(self.num_iteration):
batch_updated_node_features = []
for b in range(B):
N = record_len[b]
t_matrix = pairwise_t_matrix[b][(:N, :N, :, :)]
updated_node_features = []
for i in range(N):
mask = roi_mask[(b, :N, i, ...)]
neighbor_feature = warp_affine(batch_node_features[b], t_matrix[(:, i, :, :)], (H, W))
ego_agent_feature = batch_node_features[b][i].unsqueeze(0).repeat(N, 1, 1, 1)
neighbor_feature = torch.cat([neighbor_feature, ego_agent_feature], dim=1)
message = (self.msg_cnn(neighbor_feature) * mask)
if (self.agg_operator == 'avg'):
agg_feature = torch.mean(message, dim=0)
elif (self.agg_operator == 'max'):
agg_feature = torch.max(message, dim=0)[0]
else:
raise ValueError('agg_operator has wrong value')
cat_feature = torch.cat([batch_node_features[b][(i, ...)], agg_feature], dim=0)
if self.gru_flag:
gru_out = self.conv_gru(cat_feature.unsqueeze(0).unsqueeze(0))[0][0].squeeze(0).squeeze(0)
else:
gru_out = (batch_node_features[b][(i, ...)] + agg_feature)
updated_node_features.append(gru_out.unsqueeze(0))
batch_updated_node_features.append(torch.cat(updated_node_features, dim=0))
batch_node_features = batch_updated_node_features
out = torch.cat([itm[(0, ...)].unsqueeze(0) for itm in batch_node_features], dim=0)
out = self.mlp(out.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return out<|docstring|>Fusion forwarding.
Parameters
----------
x : torch.Tensor
input data, (B, C, H, W)
record_len : list
shape: (B)
pairwise_t_matrix : torch.Tensor
The transformation matrix from each cav to ego,
shape: (B, L, L, 4, 4)
Returns
-------
Fused feature.<|endoftext|> |
c21820bb870be16376f03687c049b1b04a12171075f1300a8097b46b0e47b049 | def __init__(self, transforms):
'Constructor.\n\n Args:\n transforms: an iterable of `Transform` objects.\n '
super().__init__()
self._transforms = nn.ModuleList(transforms) | Constructor.
Args:
transforms: an iterable of `Transform` objects. | manifold_flow/transforms/base.py | __init__ | Butters-cloud/Denoising-Normalizing-Flow | 199 | python | def __init__(self, transforms):
'Constructor.\n\n Args:\n transforms: an iterable of `Transform` objects.\n '
super().__init__()
self._transforms = nn.ModuleList(transforms) | def __init__(self, transforms):
'Constructor.\n\n Args:\n transforms: an iterable of `Transform` objects.\n '
super().__init__()
self._transforms = nn.ModuleList(transforms)<|docstring|>Constructor.
Args:
transforms: an iterable of `Transform` objects.<|endoftext|> |
8918ebea7b0984d00d67bf28b001d8c6c976f1f9102715ddb73dbeeebf277848 | def __init__(self, num_transforms, split_dim=1):
'Constructor.\n\n Args:\n num_transforms: int, total number of transforms to be added.\n split_dim: dimension along which to split.\n '
if (not various.is_positive_int(split_dim)):
raise TypeError('Split dimension must be a positive integer.')
super().__init__()
self._transforms = nn.ModuleList()
self._output_shapes = []
self._num_transforms = num_transforms
self._split_dim = split_dim | Constructor.
Args:
num_transforms: int, total number of transforms to be added.
split_dim: dimension along which to split. | manifold_flow/transforms/base.py | __init__ | Butters-cloud/Denoising-Normalizing-Flow | 199 | python | def __init__(self, num_transforms, split_dim=1):
'Constructor.\n\n Args:\n num_transforms: int, total number of transforms to be added.\n split_dim: dimension along which to split.\n '
if (not various.is_positive_int(split_dim)):
raise TypeError('Split dimension must be a positive integer.')
super().__init__()
self._transforms = nn.ModuleList()
self._output_shapes = []
self._num_transforms = num_transforms
self._split_dim = split_dim | def __init__(self, num_transforms, split_dim=1):
'Constructor.\n\n Args:\n num_transforms: int, total number of transforms to be added.\n split_dim: dimension along which to split.\n '
if (not various.is_positive_int(split_dim)):
raise TypeError('Split dimension must be a positive integer.')
super().__init__()
self._transforms = nn.ModuleList()
self._output_shapes = []
self._num_transforms = num_transforms
self._split_dim = split_dim<|docstring|>Constructor.
Args:
num_transforms: int, total number of transforms to be added.
split_dim: dimension along which to split.<|endoftext|> |
33519c96f5a7a666fe86e7cfb347bdfc092f1575b16b83a00e423b4ad7aac4f5 | def add_transform(self, transform, transform_output_shape):
"Add a transform. Must be called exactly `num_transforms` times.\n\n Parameters:\n transform: the `Transform` object to be added.\n transform_output_shape: tuple, shape of transform's outputs, excl. the first batch\n dimension.\n\n Returns:\n Input shape for the next transform, or None if adding the last transform.\n "
assert (len(self._transforms) <= self._num_transforms)
if (len(self._transforms) == self._num_transforms):
raise RuntimeError('Adding more than {} transforms is not allowed.'.format(self._num_transforms))
if ((self._split_dim - 1) >= len(transform_output_shape)):
raise ValueError('No split_dim in output shape')
if (transform_output_shape[(self._split_dim - 1)] < 2):
raise ValueError('Size of dimension {} must be at least 2.'.format(self._split_dim))
self._transforms.append(transform)
if (len(self._transforms) != self._num_transforms):
output_shape = list(transform_output_shape)
output_shape[(self._split_dim - 1)] = ((output_shape[(self._split_dim - 1)] + 1) // 2)
output_shape = tuple(output_shape)
hidden_shape = list(transform_output_shape)
hidden_shape[(self._split_dim - 1)] = (hidden_shape[(self._split_dim - 1)] // 2)
hidden_shape = tuple(hidden_shape)
else:
output_shape = transform_output_shape
hidden_shape = None
self._output_shapes.append(output_shape)
return hidden_shape | Add a transform. Must be called exactly `num_transforms` times.
Parameters:
transform: the `Transform` object to be added.
transform_output_shape: tuple, shape of transform's outputs, excl. the first batch
dimension.
Returns:
Input shape for the next transform, or None if adding the last transform. | manifold_flow/transforms/base.py | add_transform | Butters-cloud/Denoising-Normalizing-Flow | 199 | python | def add_transform(self, transform, transform_output_shape):
"Add a transform. Must be called exactly `num_transforms` times.\n\n Parameters:\n transform: the `Transform` object to be added.\n transform_output_shape: tuple, shape of transform's outputs, excl. the first batch\n dimension.\n\n Returns:\n Input shape for the next transform, or None if adding the last transform.\n "
assert (len(self._transforms) <= self._num_transforms)
if (len(self._transforms) == self._num_transforms):
raise RuntimeError('Adding more than {} transforms is not allowed.'.format(self._num_transforms))
if ((self._split_dim - 1) >= len(transform_output_shape)):
raise ValueError('No split_dim in output shape')
if (transform_output_shape[(self._split_dim - 1)] < 2):
raise ValueError('Size of dimension {} must be at least 2.'.format(self._split_dim))
self._transforms.append(transform)
if (len(self._transforms) != self._num_transforms):
output_shape = list(transform_output_shape)
output_shape[(self._split_dim - 1)] = ((output_shape[(self._split_dim - 1)] + 1) // 2)
output_shape = tuple(output_shape)
hidden_shape = list(transform_output_shape)
hidden_shape[(self._split_dim - 1)] = (hidden_shape[(self._split_dim - 1)] // 2)
hidden_shape = tuple(hidden_shape)
else:
output_shape = transform_output_shape
hidden_shape = None
self._output_shapes.append(output_shape)
return hidden_shape | def add_transform(self, transform, transform_output_shape):
"Add a transform. Must be called exactly `num_transforms` times.\n\n Parameters:\n transform: the `Transform` object to be added.\n transform_output_shape: tuple, shape of transform's outputs, excl. the first batch\n dimension.\n\n Returns:\n Input shape for the next transform, or None if adding the last transform.\n "
assert (len(self._transforms) <= self._num_transforms)
if (len(self._transforms) == self._num_transforms):
raise RuntimeError('Adding more than {} transforms is not allowed.'.format(self._num_transforms))
if ((self._split_dim - 1) >= len(transform_output_shape)):
raise ValueError('No split_dim in output shape')
if (transform_output_shape[(self._split_dim - 1)] < 2):
raise ValueError('Size of dimension {} must be at least 2.'.format(self._split_dim))
self._transforms.append(transform)
if (len(self._transforms) != self._num_transforms):
output_shape = list(transform_output_shape)
output_shape[(self._split_dim - 1)] = ((output_shape[(self._split_dim - 1)] + 1) // 2)
output_shape = tuple(output_shape)
hidden_shape = list(transform_output_shape)
hidden_shape[(self._split_dim - 1)] = (hidden_shape[(self._split_dim - 1)] // 2)
hidden_shape = tuple(hidden_shape)
else:
output_shape = transform_output_shape
hidden_shape = None
self._output_shapes.append(output_shape)
return hidden_shape<|docstring|>Add a transform. Must be called exactly `num_transforms` times.
Parameters:
transform: the `Transform` object to be added.
transform_output_shape: tuple, shape of transform's outputs, excl. the first batch
dimension.
Returns:
Input shape for the next transform, or None if adding the last transform.<|endoftext|> |
6eb8223c65cce177e07a7b324c19559bb776b11a009752e09a59717cf21d0f11 | def __init__(self, transform):
'Constructor.\n\n Args:\n transform: An object of type `Transform`.\n '
super().__init__()
self._transform = transform | Constructor.
Args:
transform: An object of type `Transform`. | manifold_flow/transforms/base.py | __init__ | Butters-cloud/Denoising-Normalizing-Flow | 199 | python | def __init__(self, transform):
'Constructor.\n\n Args:\n transform: An object of type `Transform`.\n '
super().__init__()
self._transform = transform | def __init__(self, transform):
'Constructor.\n\n Args:\n transform: An object of type `Transform`.\n '
super().__init__()
self._transform = transform<|docstring|>Constructor.
Args:
transform: An object of type `Transform`.<|endoftext|> |
1894fa656e93ef02930d944addc7ac81b7450b4f72d16e9519d31013a8a881e9 | def quick(argv):
'\n quick [+-rR[N]] [n]\n 重复挑战当前画面关卡特定次数或直到理智不足\n +r/-r 是否自动回复理智,最多回复 N 次\n +R/-R 是否使用源石回复理智(需要同时开启 +r)\n '
ops = _parse_opt(argv)
if (len(argv) == 2):
count = int(argv[1])
else:
count = 114514
helper = _create_helper(show_toggle=True)
for op in ops:
op(helper)
with helper._shellng_context:
helper.module_battle_slim(c_id=None, set_count=count)
return 0 | quick [+-rR[N]] [n]
重复挑战当前画面关卡特定次数或直到理智不足
+r/-r 是否自动回复理智,最多回复 N 次
+R/-R 是否使用源石回复理智(需要同时开启 +r) | Arknights/shell_next.py | quick | ligeek/ArknightsAutoHelper | 3 | python | def quick(argv):
'\n quick [+-rR[N]] [n]\n 重复挑战当前画面关卡特定次数或直到理智不足\n +r/-r 是否自动回复理智,最多回复 N 次\n +R/-R 是否使用源石回复理智(需要同时开启 +r)\n '
ops = _parse_opt(argv)
if (len(argv) == 2):
count = int(argv[1])
else:
count = 114514
helper = _create_helper(show_toggle=True)
for op in ops:
op(helper)
with helper._shellng_context:
helper.module_battle_slim(c_id=None, set_count=count)
return 0 | def quick(argv):
'\n quick [+-rR[N]] [n]\n 重复挑战当前画面关卡特定次数或直到理智不足\n +r/-r 是否自动回复理智,最多回复 N 次\n +R/-R 是否使用源石回复理智(需要同时开启 +r)\n '
ops = _parse_opt(argv)
if (len(argv) == 2):
count = int(argv[1])
else:
count = 114514
helper = _create_helper(show_toggle=True)
for op in ops:
op(helper)
with helper._shellng_context:
helper.module_battle_slim(c_id=None, set_count=count)
return 0<|docstring|>quick [+-rR[N]] [n]
重复挑战当前画面关卡特定次数或直到理智不足
+r/-r 是否自动回复理智,最多回复 N 次
+R/-R 是否使用源石回复理智(需要同时开启 +r)<|endoftext|> |
001ef811f49195e530f50f8f43be2b95a84deaa81de7c5227be57d181c1cecd8 | def auto(argv):
'\n auto [+-rR[N]] stage1 count1 [stage2 count2] ...\n 按顺序挑战指定关卡特定次数直到理智不足\n '
ops = _parse_opt(argv)
arglist = argv[1:]
if ((len(arglist) % 2) != 0):
print('usage: auto [+-rR] stage1 count1 [stage2 count2] ...')
return 1
it = iter(arglist)
tasks = [(stage.upper(), int(counts)) for (stage, counts) in zip(it, it)]
helper = _create_helper(show_toggle=True)
for op in ops:
op(helper)
with helper._shellng_context:
helper.main_handler(clear_tasks=False, task_list=tasks, auto_close=False)
return 0 | auto [+-rR[N]] stage1 count1 [stage2 count2] ...
按顺序挑战指定关卡特定次数直到理智不足 | Arknights/shell_next.py | auto | ligeek/ArknightsAutoHelper | 3 | python | def auto(argv):
'\n auto [+-rR[N]] stage1 count1 [stage2 count2] ...\n 按顺序挑战指定关卡特定次数直到理智不足\n '
ops = _parse_opt(argv)
arglist = argv[1:]
if ((len(arglist) % 2) != 0):
print('usage: auto [+-rR] stage1 count1 [stage2 count2] ...')
return 1
it = iter(arglist)
tasks = [(stage.upper(), int(counts)) for (stage, counts) in zip(it, it)]
helper = _create_helper(show_toggle=True)
for op in ops:
op(helper)
with helper._shellng_context:
helper.main_handler(clear_tasks=False, task_list=tasks, auto_close=False)
return 0 | def auto(argv):
'\n auto [+-rR[N]] stage1 count1 [stage2 count2] ...\n 按顺序挑战指定关卡特定次数直到理智不足\n '
ops = _parse_opt(argv)
arglist = argv[1:]
if ((len(arglist) % 2) != 0):
print('usage: auto [+-rR] stage1 count1 [stage2 count2] ...')
return 1
it = iter(arglist)
tasks = [(stage.upper(), int(counts)) for (stage, counts) in zip(it, it)]
helper = _create_helper(show_toggle=True)
for op in ops:
op(helper)
with helper._shellng_context:
helper.main_handler(clear_tasks=False, task_list=tasks, auto_close=False)
return 0<|docstring|>auto [+-rR[N]] stage1 count1 [stage2 count2] ...
按顺序挑战指定关卡特定次数直到理智不足<|endoftext|> |
5dc61ac78f1b9fc0fa3df69af830d7ae38d2a41ae154ec437ce5839f47ef432b | def collect(argv):
'\n collect\n 收集每日任务奖励\n '
helper = _create_helper()
with helper._shellng_context:
helper.clear_daily_task()
return 0 | collect
收集每日任务奖励 | Arknights/shell_next.py | collect | ligeek/ArknightsAutoHelper | 3 | python | def collect(argv):
'\n collect\n 收集每日任务奖励\n '
helper = _create_helper()
with helper._shellng_context:
helper.clear_daily_task()
return 0 | def collect(argv):
'\n collect\n 收集每日任务奖励\n '
helper = _create_helper()
with helper._shellng_context:
helper.clear_daily_task()
return 0<|docstring|>collect
收集每日任务奖励<|endoftext|> |
051d5ec73c3849ec62dd40e934ba1f67a71c061da79883891c77baeada49928c | def recruit(argv):
'\n recruit [tags ...]\n 公开招募识别/计算,不指定标签则从截图中识别\n '
from . import recruit_calc
if (2 <= len(argv) <= 6):
tags = argv[1:]
result = recruit_calc.calculate(tags)
elif (len(argv) == 1):
helper = _create_helper(use_status_line=False)
with helper._shellng_context:
result = helper.recruit()
else:
print('要素过多')
return 1
colors = ['\x1b[36m', '\x1b[90m', '\x1b[37m', '\x1b[32m', '\x1b[93m', '\x1b[91m']
reset = '\x1b[39m'
for (tags, operators, rank) in result:
taglist = ','.join(tags)
if (rank >= 1):
taglist = (('\x1b[96m' + taglist) + '\x1b[39m')
print(('%s: %s' % (taglist, ' '.join((((colors[op[1]] + op[0]) + reset) for op in operators))))) | recruit [tags ...]
公开招募识别/计算,不指定标签则从截图中识别 | Arknights/shell_next.py | recruit | ligeek/ArknightsAutoHelper | 3 | python | def recruit(argv):
'\n recruit [tags ...]\n 公开招募识别/计算,不指定标签则从截图中识别\n '
from . import recruit_calc
if (2 <= len(argv) <= 6):
tags = argv[1:]
result = recruit_calc.calculate(tags)
elif (len(argv) == 1):
helper = _create_helper(use_status_line=False)
with helper._shellng_context:
result = helper.recruit()
else:
print('要素过多')
return 1
colors = ['\x1b[36m', '\x1b[90m', '\x1b[37m', '\x1b[32m', '\x1b[93m', '\x1b[91m']
reset = '\x1b[39m'
for (tags, operators, rank) in result:
taglist = ','.join(tags)
if (rank >= 1):
taglist = (('\x1b[96m' + taglist) + '\x1b[39m')
print(('%s: %s' % (taglist, ' '.join((((colors[op[1]] + op[0]) + reset) for op in operators))))) | def recruit(argv):
'\n recruit [tags ...]\n 公开招募识别/计算,不指定标签则从截图中识别\n '
from . import recruit_calc
if (2 <= len(argv) <= 6):
tags = argv[1:]
result = recruit_calc.calculate(tags)
elif (len(argv) == 1):
helper = _create_helper(use_status_line=False)
with helper._shellng_context:
result = helper.recruit()
else:
print('要素过多')
return 1
colors = ['\x1b[36m', '\x1b[90m', '\x1b[37m', '\x1b[32m', '\x1b[93m', '\x1b[91m']
reset = '\x1b[39m'
for (tags, operators, rank) in result:
taglist = ','.join(tags)
if (rank >= 1):
taglist = (('\x1b[96m' + taglist) + '\x1b[39m')
print(('%s: %s' % (taglist, ' '.join((((colors[op[1]] + op[0]) + reset) for op in operators)))))<|docstring|>recruit [tags ...]
公开招募识别/计算,不指定标签则从截图中识别<|endoftext|> |
239627b2ad7e484f9f3148a2ca1fd0ce7d4ae47d74ed0b48297a5e283be49096 | def interactive(argv):
'\n interactive\n 进入交互模式,减少按键次数(\n '
import shlex
import traceback
helpcmds(interactive_cmds)
errorlevel = None
try:
import readline
except ImportError:
pass
while True:
try:
cmdline = input('akhelper> ')
argv = shlex.split(cmdline)
if ((len(argv) == 0) or (argv[0] == '?') or (argv[0] == 'help')):
print(' '.join((x.__name__ for x in interactive_cmds)))
continue
elif (argv[0] == 'exit'):
break
cmd = match_cmd(argv[0], interactive_cmds)
if (cmd is not None):
with _alarm_context_factory():
errorlevel = cmd(argv)
except EOFError:
print('')
break
except (Exception, KeyboardInterrupt) as e:
errorlevel = e
traceback.print_exc()
continue
return errorlevel | interactive
进入交互模式,减少按键次数( | Arknights/shell_next.py | interactive | ligeek/ArknightsAutoHelper | 3 | python | def interactive(argv):
'\n interactive\n 进入交互模式,减少按键次数(\n '
import shlex
import traceback
helpcmds(interactive_cmds)
errorlevel = None
try:
import readline
except ImportError:
pass
while True:
try:
cmdline = input('akhelper> ')
argv = shlex.split(cmdline)
if ((len(argv) == 0) or (argv[0] == '?') or (argv[0] == 'help')):
print(' '.join((x.__name__ for x in interactive_cmds)))
continue
elif (argv[0] == 'exit'):
break
cmd = match_cmd(argv[0], interactive_cmds)
if (cmd is not None):
with _alarm_context_factory():
errorlevel = cmd(argv)
except EOFError:
print()
break
except (Exception, KeyboardInterrupt) as e:
errorlevel = e
traceback.print_exc()
continue
return errorlevel | def interactive(argv):
'\n interactive\n 进入交互模式,减少按键次数(\n '
import shlex
import traceback
helpcmds(interactive_cmds)
errorlevel = None
try:
import readline
except ImportError:
pass
while True:
try:
cmdline = input('akhelper> ')
argv = shlex.split(cmdline)
if ((len(argv) == 0) or (argv[0] == '?') or (argv[0] == 'help')):
print(' '.join((x.__name__ for x in interactive_cmds)))
continue
elif (argv[0] == 'exit'):
break
cmd = match_cmd(argv[0], interactive_cmds)
if (cmd is not None):
with _alarm_context_factory():
errorlevel = cmd(argv)
except EOFError:
print()
break
except (Exception, KeyboardInterrupt) as e:
errorlevel = e
traceback.print_exc()
continue
return errorlevel<|docstring|>interactive
进入交互模式,减少按键次数(<|endoftext|> |
9896080599d59aa3deca0720d17c5248b45c61aabdd31b4a161b365b59bdcf99 | def help(argv):
'\n help\n 输出本段消息\n '
print(('usage: %s command [command args]' % argv0))
helpcmds(global_cmds) | help
输出本段消息 | Arknights/shell_next.py | help | ligeek/ArknightsAutoHelper | 3 | python | def help(argv):
'\n help\n 输出本段消息\n '
print(('usage: %s command [command args]' % argv0))
helpcmds(global_cmds) | def help(argv):
'\n help\n 输出本段消息\n '
print(('usage: %s command [command args]' % argv0))
helpcmds(global_cmds)<|docstring|>help
输出本段消息<|endoftext|> |
a20ca8435094898ccfe1def03f83e931417b164f384488ffee77c714ae7a96b8 | def etree(self):
'\n Returns a tree root object.\n '
if (not hasattr(self, '_etree_root')):
self._etree_root = ET.fromstring(self.data())
return self._etree_root | Returns a tree root object. | dexy/datas/et.py | etree | dsoto/dexy | 136 | python | def etree(self):
'\n \n '
if (not hasattr(self, '_etree_root')):
self._etree_root = ET.fromstring(self.data())
return self._etree_root | def etree(self):
'\n \n '
if (not hasattr(self, '_etree_root')):
self._etree_root = ET.fromstring(self.data())
return self._etree_root<|docstring|>Returns a tree root object.<|endoftext|> |
7775b2f1bdf66821016bc14de7b9239a79701fe9f92a6e231a4aac64d35fb87a | def btn_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
'add inline btn\n\n Returns 2 part tuple containing list of nodes to insert into the\n document and a list of system messages. Both are allowed to be\n empty.\n\n :param name: The role name used in the document.\n :param rawtext: The entire markup snippet, with role.\n :param text: The text marked with the role. (it should use the following format: `<fontawesome icon> text`) color is optional\n :param lineno: The line number where rawtext appears in the input.\n :param inliner: The inliner instance that called us.\n :param options: Directive options for customization.\n :param content: The directive content for customization.\n '
node = nodes.reference()
if (text.find('<') != (- 1)):
start = text.find('<')
end = text.find('>')
icon = text[(start + 1):end]
text = text[(end + 1):]
else:
icon = ''
margin = ('mr-1' if text else '')
html = f'<span class="guilabel"><i class="{icon} {margin}"></i>{text.strip()}</span>'
node = nodes.raw('', html, format='html')
return ([node], []) | add inline btn
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role. (it should use the following format: `<fontawesome icon> text`) color is optional
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization. | docs/source/_extentions/btn.py | btn_role | t-elisee/sepal-doc | 2 | python | def btn_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
'add inline btn\n\n Returns 2 part tuple containing list of nodes to insert into the\n document and a list of system messages. Both are allowed to be\n empty.\n\n :param name: The role name used in the document.\n :param rawtext: The entire markup snippet, with role.\n :param text: The text marked with the role. (it should use the following format: `<fontawesome icon> text`) color is optional\n :param lineno: The line number where rawtext appears in the input.\n :param inliner: The inliner instance that called us.\n :param options: Directive options for customization.\n :param content: The directive content for customization.\n '
node = nodes.reference()
if (text.find('<') != (- 1)):
start = text.find('<')
end = text.find('>')
icon = text[(start + 1):end]
text = text[(end + 1):]
else:
icon =
margin = ('mr-1' if text else )
html = f'<span class="guilabel"><i class="{icon} {margin}"></i>{text.strip()}</span>'
node = nodes.raw(, html, format='html')
return ([node], []) | def btn_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
'add inline btn\n\n Returns 2 part tuple containing list of nodes to insert into the\n document and a list of system messages. Both are allowed to be\n empty.\n\n :param name: The role name used in the document.\n :param rawtext: The entire markup snippet, with role.\n :param text: The text marked with the role. (it should use the following format: `<fontawesome icon> text`) color is optional\n :param lineno: The line number where rawtext appears in the input.\n :param inliner: The inliner instance that called us.\n :param options: Directive options for customization.\n :param content: The directive content for customization.\n '
node = nodes.reference()
if (text.find('<') != (- 1)):
start = text.find('<')
end = text.find('>')
icon = text[(start + 1):end]
text = text[(end + 1):]
else:
icon =
margin = ('mr-1' if text else )
html = f'<span class="guilabel"><i class="{icon} {margin}"></i>{text.strip()}</span>'
node = nodes.raw(, html, format='html')
return ([node], [])<|docstring|>add inline btn
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role. (it should use the following format: `<fontawesome icon> text`) color is optional
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.<|endoftext|> |
b67b136590b31e6e827ab2a04b58658855f0316af9afeece45120656f505d8e3 | def setup(app):
'Install the plugin.\n\n :param app: Sphinx application context.\n '
app.add_role('btn', btn_role)
return | Install the plugin.
:param app: Sphinx application context. | docs/source/_extentions/btn.py | setup | t-elisee/sepal-doc | 2 | python | def setup(app):
'Install the plugin.\n\n :param app: Sphinx application context.\n '
app.add_role('btn', btn_role)
return | def setup(app):
'Install the plugin.\n\n :param app: Sphinx application context.\n '
app.add_role('btn', btn_role)
return<|docstring|>Install the plugin.
:param app: Sphinx application context.<|endoftext|> |
89a801c6c5d2ecff6d5ff7b82ac765b7ed73386ccaf3c8f21f151aebc30ccfaf | @serial_task(settings.SYNC_TASK_TIMEOUT, base=PontoonTask, lock_key='project={0}', on_error=sync_project_error)
def sync_project(self, project_pk, sync_log_pk, locale=None, no_pull=False, no_commit=False, force=False):
'Fetch the project with the given PK and perform sync on it.'
db_project = get_or_fail(Project, pk=project_pk, message='Could not sync project with pk={0}, not found.'.format(project_pk))
sync_log = get_or_fail(SyncLog, pk=sync_log_pk, message='Could not sync project {0}, log with pk={1} not found.'.format(db_project.slug, sync_log_pk))
log.info('Syncing project {0}.'.format(db_project.slug))
now = timezone.now()
project_sync_log = ProjectSyncLog.objects.create(sync_log=sync_log, project=db_project, start_time=now)
if locale:
sync_translations.delay(project_pk, project_sync_log.pk, now, locale=locale, no_pull=no_pull, no_commit=no_commit, full_scan=force)
return
resource_changes = sync_resources(db_project, now, force, no_pull)
if (not resource_changes):
project_sync_log.skip()
return
sync_translations.delay(project_pk, project_sync_log.pk, now, resource_changes['project_changes'], resource_changes['obsolete_vcs_resources'], resource_changes['new_paths'], no_pull=no_pull, no_commit=no_commit, full_scan=force) | Fetch the project with the given PK and perform sync on it. | pontoon/sync/tasks.py | sync_project | julen/pontoon | 0 | python | @serial_task(settings.SYNC_TASK_TIMEOUT, base=PontoonTask, lock_key='project={0}', on_error=sync_project_error)
def sync_project(self, project_pk, sync_log_pk, locale=None, no_pull=False, no_commit=False, force=False):
db_project = get_or_fail(Project, pk=project_pk, message='Could not sync project with pk={0}, not found.'.format(project_pk))
sync_log = get_or_fail(SyncLog, pk=sync_log_pk, message='Could not sync project {0}, log with pk={1} not found.'.format(db_project.slug, sync_log_pk))
log.info('Syncing project {0}.'.format(db_project.slug))
now = timezone.now()
project_sync_log = ProjectSyncLog.objects.create(sync_log=sync_log, project=db_project, start_time=now)
if locale:
sync_translations.delay(project_pk, project_sync_log.pk, now, locale=locale, no_pull=no_pull, no_commit=no_commit, full_scan=force)
return
resource_changes = sync_resources(db_project, now, force, no_pull)
if (not resource_changes):
project_sync_log.skip()
return
sync_translations.delay(project_pk, project_sync_log.pk, now, resource_changes['project_changes'], resource_changes['obsolete_vcs_resources'], resource_changes['new_paths'], no_pull=no_pull, no_commit=no_commit, full_scan=force) | @serial_task(settings.SYNC_TASK_TIMEOUT, base=PontoonTask, lock_key='project={0}', on_error=sync_project_error)
def sync_project(self, project_pk, sync_log_pk, locale=None, no_pull=False, no_commit=False, force=False):
db_project = get_or_fail(Project, pk=project_pk, message='Could not sync project with pk={0}, not found.'.format(project_pk))
sync_log = get_or_fail(SyncLog, pk=sync_log_pk, message='Could not sync project {0}, log with pk={1} not found.'.format(db_project.slug, sync_log_pk))
log.info('Syncing project {0}.'.format(db_project.slug))
now = timezone.now()
project_sync_log = ProjectSyncLog.objects.create(sync_log=sync_log, project=db_project, start_time=now)
if locale:
sync_translations.delay(project_pk, project_sync_log.pk, now, locale=locale, no_pull=no_pull, no_commit=no_commit, full_scan=force)
return
resource_changes = sync_resources(db_project, now, force, no_pull)
if (not resource_changes):
project_sync_log.skip()
return
sync_translations.delay(project_pk, project_sync_log.pk, now, resource_changes['project_changes'], resource_changes['obsolete_vcs_resources'], resource_changes['new_paths'], no_pull=no_pull, no_commit=no_commit, full_scan=force)<|docstring|>Fetch the project with the given PK and perform sync on it.<|endoftext|> |
4d212e9d39a88bcf5bb90a9eb20e94ae66bb00b7d8cdda65ce0287d4f694b188 | def filter_all(self, cells, filter_properties):
'Override filter_all() which operates on the full list\n of cells...\n '
scheduler_hints = filter_properties.get('scheduler_hints')
if (not scheduler_hints):
return cells
cell_routes = scheduler_hints.get('different_cell')
if (not cell_routes):
return cells
if isinstance(cell_routes, six.string_types):
cell_routes = [cell_routes]
if (not self.authorized(filter_properties['context'])):
return cells
routing_path = filter_properties['routing_path']
filtered_cells = []
for cell in cells:
if (not self._cell_state_matches(cell, routing_path, cell_routes)):
filtered_cells.append(cell)
return filtered_cells | Override filter_all() which operates on the full list
of cells... | nova/cells/filters/different_cell.py | filter_all | jovial/nova | 5 | python | def filter_all(self, cells, filter_properties):
'Override filter_all() which operates on the full list\n of cells...\n '
scheduler_hints = filter_properties.get('scheduler_hints')
if (not scheduler_hints):
return cells
cell_routes = scheduler_hints.get('different_cell')
if (not cell_routes):
return cells
if isinstance(cell_routes, six.string_types):
cell_routes = [cell_routes]
if (not self.authorized(filter_properties['context'])):
return cells
routing_path = filter_properties['routing_path']
filtered_cells = []
for cell in cells:
if (not self._cell_state_matches(cell, routing_path, cell_routes)):
filtered_cells.append(cell)
return filtered_cells | def filter_all(self, cells, filter_properties):
'Override filter_all() which operates on the full list\n of cells...\n '
scheduler_hints = filter_properties.get('scheduler_hints')
if (not scheduler_hints):
return cells
cell_routes = scheduler_hints.get('different_cell')
if (not cell_routes):
return cells
if isinstance(cell_routes, six.string_types):
cell_routes = [cell_routes]
if (not self.authorized(filter_properties['context'])):
return cells
routing_path = filter_properties['routing_path']
filtered_cells = []
for cell in cells:
if (not self._cell_state_matches(cell, routing_path, cell_routes)):
filtered_cells.append(cell)
return filtered_cells<|docstring|>Override filter_all() which operates on the full list
of cells...<|endoftext|> |
ce250a4b7ccfa2888cf39aa0f7d67423f0c9bbeca6b93834bbe1671ce9aca736 | def __init__(self, state_shape: tuple, action_shape: tuple, name: str, side: str='up', config: Config=None, caching: bool=False, cache: RedisCache=None, channel: RedisChannel=None):
'\n abstract class for agent which define the general interface for Agents\n :param name:\n :param side:\n '
assert (side in ['up', 'down']), 'Side has to be up or down'
self.name = name
self.side = side
self.state_shape = state_shape
self.action_shape = action_shape
self.number_turns = 0
self.td_loss_history = []
self.moving_average_loss = []
self.reward_history = []
self.moving_average_rewards = []
self._episode_reward = 0
if (config is None):
config = Config()
if caching:
logging.debug('Caching is considered! When you don´t deliver cache and stream by yourself, the agent will get a redis stream and cache by default')
if (cache is None):
self.redis_cache = RedisCache(host=config['REDIS_HOST'], port=config['REDIS_PORT'], db=int(config['REDIS_DB']))
else:
self.redis_cache = cache
if (channel is None):
self.redis_channel = RedisChannel(host=config['REDIS_HOST'], port=config['REDIS_PORT'], db=int(config['REDIS_DB']))
else:
self.redis_channel = channel
else:
self.redis_cache = None
self.redis_channel = None
logging.info('No caching is considered!') | abstract class for agent which define the general interface for Agents
:param name:
:param side: | checkers/agents/Agent.py | __init__ | FelixKleineBoesing/CheckersAI | 1 | python | def __init__(self, state_shape: tuple, action_shape: tuple, name: str, side: str='up', config: Config=None, caching: bool=False, cache: RedisCache=None, channel: RedisChannel=None):
'\n abstract class for agent which define the general interface for Agents\n :param name:\n :param side:\n '
assert (side in ['up', 'down']), 'Side has to be up or down'
self.name = name
self.side = side
self.state_shape = state_shape
self.action_shape = action_shape
self.number_turns = 0
self.td_loss_history = []
self.moving_average_loss = []
self.reward_history = []
self.moving_average_rewards = []
self._episode_reward = 0
if (config is None):
config = Config()
if caching:
logging.debug('Caching is considered! When you don´t deliver cache and stream by yourself, the agent will get a redis stream and cache by default')
if (cache is None):
self.redis_cache = RedisCache(host=config['REDIS_HOST'], port=config['REDIS_PORT'], db=int(config['REDIS_DB']))
else:
self.redis_cache = cache
if (channel is None):
self.redis_channel = RedisChannel(host=config['REDIS_HOST'], port=config['REDIS_PORT'], db=int(config['REDIS_DB']))
else:
self.redis_channel = channel
else:
self.redis_cache = None
self.redis_channel = None
logging.info('No caching is considered!') | def __init__(self, state_shape: tuple, action_shape: tuple, name: str, side: str='up', config: Config=None, caching: bool=False, cache: RedisCache=None, channel: RedisChannel=None):
'\n abstract class for agent which define the general interface for Agents\n :param name:\n :param side:\n '
assert (side in ['up', 'down']), 'Side has to be up or down'
self.name = name
self.side = side
self.state_shape = state_shape
self.action_shape = action_shape
self.number_turns = 0
self.td_loss_history = []
self.moving_average_loss = []
self.reward_history = []
self.moving_average_rewards = []
self._episode_reward = 0
if (config is None):
config = Config()
if caching:
logging.debug('Caching is considered! When you don´t deliver cache and stream by yourself, the agent will get a redis stream and cache by default')
if (cache is None):
self.redis_cache = RedisCache(host=config['REDIS_HOST'], port=config['REDIS_PORT'], db=int(config['REDIS_DB']))
else:
self.redis_cache = cache
if (channel is None):
self.redis_channel = RedisChannel(host=config['REDIS_HOST'], port=config['REDIS_PORT'], db=int(config['REDIS_DB']))
else:
self.redis_channel = channel
else:
self.redis_cache = None
self.redis_channel = None
logging.info('No caching is considered!')<|docstring|>abstract class for agent which define the general interface for Agents
:param name:
:param side:<|endoftext|> |
7090879537a74d3279b125885b82e9eb36dd8771703c14b0d3c0076fb992c4e7 | def play_turn(self, state_space: np.ndarray, action_space: ActionSpace):
'\n get all possible actions and decide which action to take\n :param state_space: np array describing the board\n :param action_space: dictionary containing all possible moves\n :return:\n '
decision = self.decision(state_space, action_space)
assert isinstance(decision, np.ndarray), 'decision return must be a numpy array'
assert (len(decision) == 4), 'decision return must be a np array with length 4'
self.number_turns += 1
return decision | get all possible actions and decide which action to take
:param state_space: np array describing the board
:param action_space: dictionary containing all possible moves
:return: | checkers/agents/Agent.py | play_turn | FelixKleineBoesing/CheckersAI | 1 | python | def play_turn(self, state_space: np.ndarray, action_space: ActionSpace):
'\n get all possible actions and decide which action to take\n :param state_space: np array describing the board\n :param action_space: dictionary containing all possible moves\n :return:\n '
decision = self.decision(state_space, action_space)
assert isinstance(decision, np.ndarray), 'decision return must be a numpy array'
assert (len(decision) == 4), 'decision return must be a np array with length 4'
self.number_turns += 1
return decision | def play_turn(self, state_space: np.ndarray, action_space: ActionSpace):
'\n get all possible actions and decide which action to take\n :param state_space: np array describing the board\n :param action_space: dictionary containing all possible moves\n :return:\n '
decision = self.decision(state_space, action_space)
assert isinstance(decision, np.ndarray), 'decision return must be a numpy array'
assert (len(decision) == 4), 'decision return must be a np array with length 4'
self.number_turns += 1
return decision<|docstring|>get all possible actions and decide which action to take
:param state_space: np array describing the board
:param action_space: dictionary containing all possible moves
:return:<|endoftext|> |
ca2bb0aa73105a8707400ac6d1786c1804e17dc0cf6674c4d6892af8e1a7dccf | def get_feedback(self, state: np.ndarray, action: int, reward: float, next_state: np.ndarray, finished: bool):
'\n through this function the agent gets information about the last turn\n :param state:\n :param action:\n :param reward:\n :param next_state:\n :param finished:\n :return: No return\n '
if finished:
self.reward_history.append((self._episode_reward + reward))
self.moving_average_rewards.append(np.mean([self.reward_history[max([0, (len(self.reward_history) - 100)]):]]))
self._episode_reward = 0
else:
self._episode_reward += reward
self._get_feedback_inner(state, action, reward, next_state, finished) | through this function the agent gets information about the last turn
:param state:
:param action:
:param reward:
:param next_state:
:param finished:
:return: No return | checkers/agents/Agent.py | get_feedback | FelixKleineBoesing/CheckersAI | 1 | python | def get_feedback(self, state: np.ndarray, action: int, reward: float, next_state: np.ndarray, finished: bool):
'\n through this function the agent gets information about the last turn\n :param state:\n :param action:\n :param reward:\n :param next_state:\n :param finished:\n :return: No return\n '
if finished:
self.reward_history.append((self._episode_reward + reward))
self.moving_average_rewards.append(np.mean([self.reward_history[max([0, (len(self.reward_history) - 100)]):]]))
self._episode_reward = 0
else:
self._episode_reward += reward
self._get_feedback_inner(state, action, reward, next_state, finished) | def get_feedback(self, state: np.ndarray, action: int, reward: float, next_state: np.ndarray, finished: bool):
'\n through this function the agent gets information about the last turn\n :param state:\n :param action:\n :param reward:\n :param next_state:\n :param finished:\n :return: No return\n '
if finished:
self.reward_history.append((self._episode_reward + reward))
self.moving_average_rewards.append(np.mean([self.reward_history[max([0, (len(self.reward_history) - 100)]):]]))
self._episode_reward = 0
else:
self._episode_reward += reward
self._get_feedback_inner(state, action, reward, next_state, finished)<|docstring|>through this function the agent gets information about the last turn
:param state:
:param action:
:param reward:
:param next_state:
:param finished:
:return: No return<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.