text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def add_information_about_person(self, session_info):
"""If there already are information from this source in the cache
this function will overwrite that information"""
session_info = dict(session_info)
name_id = session_info["name_id"]
issuer = session_info.pop("issuer")
self.cache.set(name_id, issuer, session_info,
session_info["not_on_or_after"])
return name_id | [
"def",
"add_information_about_person",
"(",
"self",
",",
"session_info",
")",
":",
"session_info",
"=",
"dict",
"(",
"session_info",
")",
"name_id",
"=",
"session_info",
"[",
"\"name_id\"",
"]",
"issuer",
"=",
"session_info",
".",
"pop",
"(",
"\"issuer\"",
")",
"self",
".",
"cache",
".",
"set",
"(",
"name_id",
",",
"issuer",
",",
"session_info",
",",
"session_info",
"[",
"\"not_on_or_after\"",
"]",
")",
"return",
"name_id"
]
| 43.7 | 10.4 |
def dummyvar(cis, return_sparse=False):
'''
This is an efficient implementation of matlab's "dummyvar" command
using sparse matrices.
input: partitions, NxM array-like containing M partitions of N nodes
into <=N distinct communities
output: dummyvar, an NxR matrix containing R column variables (indicator
variables) with N entries, where R is the total number of communities
summed across each of the M partitions.
i.e.
r = sum((max(len(unique(partitions[i]))) for i in range(m)))
'''
# num_rows is not affected by partition indexes
n = np.size(cis, axis=0)
m = np.size(cis, axis=1)
r = np.sum((np.max(len(np.unique(cis[:, i])))) for i in range(m))
nnz = np.prod(cis.shape)
ix = np.argsort(cis, axis=0)
# s_cis=np.sort(cis,axis=0)
# FIXME use the sorted indices to sort by row efficiently
s_cis = cis[ix][:, range(m), range(m)]
mask = np.hstack((((True,),) * m, (s_cis[:-1, :] != s_cis[1:, :]).T))
indptr, = np.where(mask.flat)
indptr = np.append(indptr, nnz)
import scipy.sparse as sp
dv = sp.csc_matrix((np.repeat((1,), nnz), ix.T.flat, indptr), shape=(n, r))
return dv.toarray() | [
"def",
"dummyvar",
"(",
"cis",
",",
"return_sparse",
"=",
"False",
")",
":",
"# num_rows is not affected by partition indexes",
"n",
"=",
"np",
".",
"size",
"(",
"cis",
",",
"axis",
"=",
"0",
")",
"m",
"=",
"np",
".",
"size",
"(",
"cis",
",",
"axis",
"=",
"1",
")",
"r",
"=",
"np",
".",
"sum",
"(",
"(",
"np",
".",
"max",
"(",
"len",
"(",
"np",
".",
"unique",
"(",
"cis",
"[",
":",
",",
"i",
"]",
")",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"m",
")",
")",
"nnz",
"=",
"np",
".",
"prod",
"(",
"cis",
".",
"shape",
")",
"ix",
"=",
"np",
".",
"argsort",
"(",
"cis",
",",
"axis",
"=",
"0",
")",
"# s_cis=np.sort(cis,axis=0)",
"# FIXME use the sorted indices to sort by row efficiently",
"s_cis",
"=",
"cis",
"[",
"ix",
"]",
"[",
":",
",",
"range",
"(",
"m",
")",
",",
"range",
"(",
"m",
")",
"]",
"mask",
"=",
"np",
".",
"hstack",
"(",
"(",
"(",
"(",
"True",
",",
")",
",",
")",
"*",
"m",
",",
"(",
"s_cis",
"[",
":",
"-",
"1",
",",
":",
"]",
"!=",
"s_cis",
"[",
"1",
":",
",",
":",
"]",
")",
".",
"T",
")",
")",
"indptr",
",",
"=",
"np",
".",
"where",
"(",
"mask",
".",
"flat",
")",
"indptr",
"=",
"np",
".",
"append",
"(",
"indptr",
",",
"nnz",
")",
"import",
"scipy",
".",
"sparse",
"as",
"sp",
"dv",
"=",
"sp",
".",
"csc_matrix",
"(",
"(",
"np",
".",
"repeat",
"(",
"(",
"1",
",",
")",
",",
"nnz",
")",
",",
"ix",
".",
"T",
".",
"flat",
",",
"indptr",
")",
",",
"shape",
"=",
"(",
"n",
",",
"r",
")",
")",
"return",
"dv",
".",
"toarray",
"(",
")"
]
| 35.757576 | 22.727273 |
def time(hour, minute=0, second=0, microsecond=0): # type: (int, int, int, int) -> Time
"""
Create a new Time instance.
"""
return Time(hour, minute, second, microsecond) | [
"def",
"time",
"(",
"hour",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
",",
"microsecond",
"=",
"0",
")",
":",
"# type: (int, int, int, int) -> Time",
"return",
"Time",
"(",
"hour",
",",
"minute",
",",
"second",
",",
"microsecond",
")"
]
| 36.6 | 13.4 |
def get_screen_pointers(self):
"""
Returns the xcb_screen_t for every screen
useful for other bindings
"""
root_iter = lib.xcb_setup_roots_iterator(self._setup)
screens = [root_iter.data]
for i in range(self._setup.roots_len - 1):
lib.xcb_screen_next(ffi.addressof((root_iter)))
screens.append(root_iter.data)
return screens | [
"def",
"get_screen_pointers",
"(",
"self",
")",
":",
"root_iter",
"=",
"lib",
".",
"xcb_setup_roots_iterator",
"(",
"self",
".",
"_setup",
")",
"screens",
"=",
"[",
"root_iter",
".",
"data",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_setup",
".",
"roots_len",
"-",
"1",
")",
":",
"lib",
".",
"xcb_screen_next",
"(",
"ffi",
".",
"addressof",
"(",
"(",
"root_iter",
")",
")",
")",
"screens",
".",
"append",
"(",
"root_iter",
".",
"data",
")",
"return",
"screens"
]
| 33.5 | 11.833333 |
def write(self, address, size, value):
"""Write arbitrary size content to memory.
"""
for i in range(0, size):
self.__write_byte(address + i, (value >> (i * 8)) & 0xff) | [
"def",
"write",
"(",
"self",
",",
"address",
",",
"size",
",",
"value",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"size",
")",
":",
"self",
".",
"__write_byte",
"(",
"address",
"+",
"i",
",",
"(",
"value",
">>",
"(",
"i",
"*",
"8",
")",
")",
"&",
"0xff",
")"
]
| 40 | 7.8 |
def save(self):
"""Convert to JSON.
Returns
-------
`dict`
JSON data.
"""
data = super().save()
data['end_chars'] = self.end_chars
data['default_end'] = self.default_end
return data | [
"def",
"save",
"(",
"self",
")",
":",
"data",
"=",
"super",
"(",
")",
".",
"save",
"(",
")",
"data",
"[",
"'end_chars'",
"]",
"=",
"self",
".",
"end_chars",
"data",
"[",
"'default_end'",
"]",
"=",
"self",
".",
"default_end",
"return",
"data"
]
| 21.25 | 16.583333 |
def BHS(self, params):
"""
BHS label
Branch to the instruction at label if the C flag is set
"""
label = self.get_one_parameter(self.ONE_PARAMETER, params)
self.check_arguments(label_exists=(label,))
# BHS label
def BHS_func():
if self.is_C_set():
self.register['PC'] = self.labels[label]
return BHS_func | [
"def",
"BHS",
"(",
"self",
",",
"params",
")",
":",
"label",
"=",
"self",
".",
"get_one_parameter",
"(",
"self",
".",
"ONE_PARAMETER",
",",
"params",
")",
"self",
".",
"check_arguments",
"(",
"label_exists",
"=",
"(",
"label",
",",
")",
")",
"# BHS label",
"def",
"BHS_func",
"(",
")",
":",
"if",
"self",
".",
"is_C_set",
"(",
")",
":",
"self",
".",
"register",
"[",
"'PC'",
"]",
"=",
"self",
".",
"labels",
"[",
"label",
"]",
"return",
"BHS_func"
]
| 24.5625 | 21.3125 |
def init_config(self):
"""Patch input.nml as a new or restart run."""
input_fpath = os.path.join(self.work_path, 'input.nml')
input_nml = f90nml.read(input_fpath)
if self.expt.counter == 0 or self.expt.repeat_run:
input_type = 'n'
else:
input_type = 'r'
input_nml['MOM_input_nml']['input_filename'] = input_type
f90nml.write(input_nml, input_fpath, force=True) | [
"def",
"init_config",
"(",
"self",
")",
":",
"input_fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"work_path",
",",
"'input.nml'",
")",
"input_nml",
"=",
"f90nml",
".",
"read",
"(",
"input_fpath",
")",
"if",
"self",
".",
"expt",
".",
"counter",
"==",
"0",
"or",
"self",
".",
"expt",
".",
"repeat_run",
":",
"input_type",
"=",
"'n'",
"else",
":",
"input_type",
"=",
"'r'",
"input_nml",
"[",
"'MOM_input_nml'",
"]",
"[",
"'input_filename'",
"]",
"=",
"input_type",
"f90nml",
".",
"write",
"(",
"input_nml",
",",
"input_fpath",
",",
"force",
"=",
"True",
")"
]
| 30.785714 | 22.5 |
def findChangelist(self, description=None):
"""Gets or creates a Changelist object with a description
:param description: The description to set or lookup
:type description: str
:returns: :class:`.Changelist`
"""
if description is None:
change = Default(self)
else:
if isinstance(description, six.integer_types):
change = Changelist(description, self)
else:
pending = self.run(['changes', '-l', '-s', 'pending', '-c', str(self._client), '-u', self._user])
for cl in pending:
if cl['desc'].strip() == description.strip():
LOGGER.debug('Changelist found: {}'.format(cl['change']))
change = Changelist(int(cl['change']), self)
break
else:
LOGGER.debug('No changelist found, creating one')
change = Changelist.create(description, self)
change.client = self._client
change.save()
return change | [
"def",
"findChangelist",
"(",
"self",
",",
"description",
"=",
"None",
")",
":",
"if",
"description",
"is",
"None",
":",
"change",
"=",
"Default",
"(",
"self",
")",
"else",
":",
"if",
"isinstance",
"(",
"description",
",",
"six",
".",
"integer_types",
")",
":",
"change",
"=",
"Changelist",
"(",
"description",
",",
"self",
")",
"else",
":",
"pending",
"=",
"self",
".",
"run",
"(",
"[",
"'changes'",
",",
"'-l'",
",",
"'-s'",
",",
"'pending'",
",",
"'-c'",
",",
"str",
"(",
"self",
".",
"_client",
")",
",",
"'-u'",
",",
"self",
".",
"_user",
"]",
")",
"for",
"cl",
"in",
"pending",
":",
"if",
"cl",
"[",
"'desc'",
"]",
".",
"strip",
"(",
")",
"==",
"description",
".",
"strip",
"(",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'Changelist found: {}'",
".",
"format",
"(",
"cl",
"[",
"'change'",
"]",
")",
")",
"change",
"=",
"Changelist",
"(",
"int",
"(",
"cl",
"[",
"'change'",
"]",
")",
",",
"self",
")",
"break",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"'No changelist found, creating one'",
")",
"change",
"=",
"Changelist",
".",
"create",
"(",
"description",
",",
"self",
")",
"change",
".",
"client",
"=",
"self",
".",
"_client",
"change",
".",
"save",
"(",
")",
"return",
"change"
]
| 42.346154 | 19.346154 |
def run(self):
"""Run command."""
command = ['npm', 'install']
self.announce(
'Running command: %s' % str(command),
level=INFO)
subprocess.check_call(command) | [
"def",
"run",
"(",
"self",
")",
":",
"command",
"=",
"[",
"'npm'",
",",
"'install'",
"]",
"self",
".",
"announce",
"(",
"'Running command: %s'",
"%",
"str",
"(",
"command",
")",
",",
"level",
"=",
"INFO",
")",
"subprocess",
".",
"check_call",
"(",
"command",
")"
]
| 29.714286 | 10.857143 |
def symmetric_elliot_function( signal, derivative=False ):
""" A fast approximation of tanh """
s = 1.0 # steepness
abs_signal = (1 + np.abs(signal * s))
if derivative:
return s / abs_signal**2
else:
# Return the activation signal
return (signal * s) / abs_signal | [
"def",
"symmetric_elliot_function",
"(",
"signal",
",",
"derivative",
"=",
"False",
")",
":",
"s",
"=",
"1.0",
"# steepness",
"abs_signal",
"=",
"(",
"1",
"+",
"np",
".",
"abs",
"(",
"signal",
"*",
"s",
")",
")",
"if",
"derivative",
":",
"return",
"s",
"/",
"abs_signal",
"**",
"2",
"else",
":",
"# Return the activation signal",
"return",
"(",
"signal",
"*",
"s",
")",
"/",
"abs_signal"
]
| 30.3 | 13.5 |
def RemoveUser(self, user):
"""Remove a Linux user account.
Args:
user: string, the Linux user account to remove.
"""
self.logger.info('Removing user %s.', user)
if self.remove:
command = self.userdel_cmd.format(user=user)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not remove user %s. %s.', user, str(e))
else:
self.logger.info('Removed user account %s.', user)
self._RemoveAuthorizedKeys(user)
self._UpdateSudoer(user, sudoer=False) | [
"def",
"RemoveUser",
"(",
"self",
",",
"user",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Removing user %s.'",
",",
"user",
")",
"if",
"self",
".",
"remove",
":",
"command",
"=",
"self",
".",
"userdel_cmd",
".",
"format",
"(",
"user",
"=",
"user",
")",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"command",
".",
"split",
"(",
"' '",
")",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'Could not remove user %s. %s.'",
",",
"user",
",",
"str",
"(",
"e",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Removed user account %s.'",
",",
"user",
")",
"self",
".",
"_RemoveAuthorizedKeys",
"(",
"user",
")",
"self",
".",
"_UpdateSudoer",
"(",
"user",
",",
"sudoer",
"=",
"False",
")"
]
| 33.823529 | 15.823529 |
def index_fields(self, index=Index(), **options):
""" Indexes the `Pointer` field and the :attr:`data` object referenced
by the `Pointer` field starting with the given *index* and returns the
:class:`Index` after the `Pointer` field.
:param Index index: :class:`Index` for the `Pointer` field.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
:attr:`data` object referenced by the `Pointer` field indexes their
referenced :attr:`~Pointer.data` object fields as well
(chained method call).
"""
index = self.index_field(index)
# Container
if is_container(self._data):
self._data.index_fields(Index(0, 0,
self.address, self.base_address,
False),
**options)
# Pointer
elif is_pointer(self._data) and get_nested(options):
self._data.index_fields(Index(0, 0,
self.address, self.base_address,
False),
**options)
# Field
elif is_field(self._data):
self._data.index_field(Index(0, 0,
self.address, self.base_address,
False))
return index | [
"def",
"index_fields",
"(",
"self",
",",
"index",
"=",
"Index",
"(",
")",
",",
"*",
"*",
"options",
")",
":",
"index",
"=",
"self",
".",
"index_field",
"(",
"index",
")",
"# Container",
"if",
"is_container",
"(",
"self",
".",
"_data",
")",
":",
"self",
".",
"_data",
".",
"index_fields",
"(",
"Index",
"(",
"0",
",",
"0",
",",
"self",
".",
"address",
",",
"self",
".",
"base_address",
",",
"False",
")",
",",
"*",
"*",
"options",
")",
"# Pointer",
"elif",
"is_pointer",
"(",
"self",
".",
"_data",
")",
"and",
"get_nested",
"(",
"options",
")",
":",
"self",
".",
"_data",
".",
"index_fields",
"(",
"Index",
"(",
"0",
",",
"0",
",",
"self",
".",
"address",
",",
"self",
".",
"base_address",
",",
"False",
")",
",",
"*",
"*",
"options",
")",
"# Field",
"elif",
"is_field",
"(",
"self",
".",
"_data",
")",
":",
"self",
".",
"_data",
".",
"index_field",
"(",
"Index",
"(",
"0",
",",
"0",
",",
"self",
".",
"address",
",",
"self",
".",
"base_address",
",",
"False",
")",
")",
"return",
"index"
]
| 46 | 17.709677 |
def min_med(images, weight_images, readnoise_list, exptime_list,
background_values, weight_masks=None, combine_grow=1,
combine_nsigma1=4, combine_nsigma2=3, fillval=False):
""" Create a median array, rejecting the highest pixel and
computing the lowest valid pixel after mask application.
.. note::
In this version of the mimmed algorithm we assume that the units of
all input data is electons.
Parameters
----------
images : list of numpy.ndarray
List of input data to be combined.
weight_images : list of numpy.ndarray
List of input data weight images to be combined.
readnoise_list : list
List of readnoise values to use for the input images.
exptime_list : list
List of exposure times to use for the input images.
background_values : list
List of image background values to use for the input images.
weight_masks : list of numpy.ndarray, None
List of imput data weight masks to use for pixel rejection.
(Default: `None`)
combine_grow : int
Radius (pixels) for neighbor rejection. (Default: 1)
combine_nsigma1 : float
Significance for accepting minimum instead of median. (Default: 4)
combine_nsigma2 : float
Significance for accepting minimum instead of median. (Default: 3)
fillval : bool
Turn on use of imedian/imean. (Default: `False`)
Returns
-------
combined_array : numpy.ndarray
Combined array.
"""
# In this case we want to calculate two things:
# 1) the median array, rejecting the highest pixel (thus running
# imcombine with nlow=0, nhigh=1, nkeep=1, using the masks)
# 2) the lowest valid pixel after applying the masks (thus running
# imcombine with nlow=0, nhigh=3, nkeep=1, using the masks)
#
# We also calculate the sum of the weight files (to produce the total
# effective exposure time for each pixel).
#
# The total effective background in the final image is calculated as
# follows:
# - convert background for each input image to counts/s
# (divide by exptime)
# - multiply this value by the weight image, to obtain the effective
# background counts (in DN) for each pixel, for each image
# - Add these images together, to obtain the total effective background
# for the combined image.
#
# Once we've made these two files, then calculate the SNR based on the
# median-pixel image, and compare with the minimum.
nimages = len(images)
combtype_median = 'imedian' if fillval else 'median'
images = np.asarray(images)
weight_images = np.asarray(weight_images)
if weight_masks == [] or weight_masks is None:
weight_masks = None
mask_sum = np.zeros(images.shape[1:], dtype=np.int16)
all_bad_idx = np.array([], dtype=np.int)
all_bad_idy = np.array([], dtype=np.int)
else:
weight_masks = np.asarray(weight_masks, dtype=np.bool)
mask_sum = np.sum(weight_masks, axis=0, dtype=np.int16)
all_bad_idx, all_bad_idy = np.where(mask_sum == nimages)
# Create a different median image based upon the number of images in the
# input list.
if nimages == 2:
median_file = num_combine(
images,
masks=weight_masks,
combination_type='imean' if fillval else 'mean',
nlow=0, nhigh=0, lower=None, upper=None
)
else:
# The value of NHIGH=1 will cause problems when there is only 1 valid
# unmasked input image for that pixel due to a difference in behavior
# between 'num_combine' and 'iraf.imcombine'.
# This value may need to be adjusted on the fly based on the number of
# inputs and the number of masked values/pixel.
#
median_file = num_combine(
images,
masks=weight_masks,
combination_type=combtype_median,
nlow=0, nhigh=1, lower=None, upper=None
)
# The following section of code will address the problem caused by
# having a value of nhigh = 1. This will behave in a way similar to
# the way the IRAF task IMCOMBINE behaves. In order to accomplish
# this, the following procedure will be followed:
# 1) The input masks will be summed.
# 2) The science data will be summed.
# 3) In the locations of the summed mask where the sum is 1 less than
# the total number of images, the value of that location in the
# summed science image will be used to replace the existing value
# in the existing median_file.
#
# This procedure is being used to prevent too much data from being
# thrown out of the image. Take for example the case of 3 input images.
# In two of the images the pixel locations have been masked out.
# Now, if nhigh is applied there will be no value to use for that
# position. However, if this new procedure is used that value in
# the resulting images will be the value that was rejected by the
# nhigh rejection step.
# We need to make certain that "bad" pixels in the sci data are set to
# 0. That way, when the sci images are summed, the value of the sum
# will only come from the "good" pixels.
if weight_masks is None:
sci_sum = np.sum(images, axis=0)
if nimages == 1:
median_file = sci_sum
else:
sci_sum = np.sum(images * np.logical_not(weight_masks), axis=0)
# Use the summed sci image values in locations where the mask_sum
# indicates that there is only 1 good pixel to use. The value will
# be used in the median_file image
idx = np.where(mask_sum == (nimages - 1))
median_file[idx] = sci_sum[idx]
# Create the minimum image from the stack of input images.
if weight_masks is not None:
# make a copy of images to avoid side-effect of modifying input
# argument:
images = images.copy()
images[weight_masks] = np.nan
images[:, all_bad_idx, all_bad_idy] = 0
minimum_file = np.nanmin(images, axis=0)
else:
minimum_file = np.amin(images, axis=0)
# Scale the weight images by the background values and add them to the bk
# Create an image of the total effective background (in DN) per pixel:
# (which is the sum of all the background-scaled weight files)
s = np.asarray([bv / et for bv, et in
zip(background_values, exptime_list)])
bkgd_file = np.sum(weight_images * s[:, None, None], axis=0)
# Scale the weight mask images by the square of the readnoise values.
# Create an image of the total readnoise**2 per pixel
# (which is the sum of all the input readnoise values).
if weight_masks is None:
rdn2 = sum((r**2 for r in readnoise_list))
readnoise_file = rdn2 * np.ones_like(images[0])
else:
readnoise_file = np.sum(
np.logical_not(weight_masks) *
(np.asarray(readnoise_list)**2)[:, None, None],
axis=0
)
# Create an image of the total effective exposure time per pixel:
# (which is simply the sum of all the drizzle output weight files)
weight_file = np.sum(weight_images, axis=0)
# Scale up both the median and minimum arrays by the total effective
# exposure time per pixel.
minimum_file_weighted = minimum_file * weight_file
median_file_weighted = median_file * weight_file
del weight_file
# Calculate the 1-sigma r.m.s.:
# variance = median_electrons + bkgd_electrons + readnoise**2
# rms = sqrt(variance)
# This image has units of electrons.
#
# make this the abs value so that negative numbers dont throw an exception?
rms_file2 = np.fmax(
median_file_weighted + bkgd_file + readnoise_file,
np.zeros_like(median_file_weighted)
)
rms_file = np.sqrt(rms_file2)
del bkgd_file, readnoise_file
# For the median array, calculate the n-sigma lower threshold to the array
# and incorporate that into the pixel values.
median_rms_file = median_file_weighted - rms_file * combine_nsigma1
if combine_grow != 0:
# Do a more sophisticated rejection: For all cases where the minimum
# pixel will be accepted instead of the median, set a lower threshold
# for that pixel and the ones around it (ie become less conservative
# in rejecting the median). This is because in cases of
# triple-incidence cosmic rays, quite often the low-lying outliers
# of the CRs can influence the median for the initial relatively high
# value of sigma, so a lower threshold must be used to mnake sure that
# the minimum is selected.
#
# This is done as follows:
# 1) make an image which is zero everywhere except where the minimum
# will be accepted
# 2) box-car smooth this image, to make these regions grow.
# 3) In the file "median_rms_file_electrons", replace these pixels
# by median - combine_nsigma2 * rms
#
# Then use this image in the final replacement, in the same way as for
# the case where this option is not selected.
minimum_flag_file = np.less(minimum_file_weighted,
median_rms_file).astype(np.float64)
# The box size value must be an integer. This is not a problem since
# __combine_grow should always be an integer type. The combine_grow
# column in the MDRIZTAB should also be an integer type.
boxsize = int(2 * combine_grow + 1)
boxshape = (boxsize, boxsize)
minimum_grow_file = np.zeros_like(images[0])
# If the boxcar convolution has failed it is potentially for
# two reasons:
# 1) The kernel size for the boxcar is bigger than the actual image.
# 2) The grow parameter was specified with a value < 0. This would
# result in an illegal boxshape kernel. The dimensions of the
# kernel box *MUST* be integer and greater than zero.
#
# If the boxcar convolution has failed, try to give a meaningfull
# explanation as to why based upon the conditionals described above.
if boxsize <= 0:
errormsg1 = "############################################################\n"
errormsg1 += "# The boxcar convolution in minmed has failed. The 'grow' #\n"
errormsg1 += "# parameter must be greater than or equal to zero. You #\n"
errormsg1 += "# specified an input value for the 'grow' parameter of: #\n"
errormsg1 += " combine_grow: " + str(combine_grow)+'\n'
errormsg1 += "############################################################\n"
raise ValueError(errormsg1)
if boxsize > images.shape[1]:
errormsg2 = "############################################################\n"
errormsg2 += "# The boxcar convolution in minmed has failed. The 'grow' #\n"
errormsg2 += "# parameter specified has resulted in a boxcar kernel that #\n"
errormsg2 += "# has dimensions larger than the actual image. You #\n"
errormsg2 += "# specified an input value for the 'grow' parameter of: #\n"
errormsg2 += " combine_grow: " + str(combine_grow) + '\n'
errormsg2 += "############################################################\n"
print(images.shape[1:])
raise ValueError(errormsg2)
# Attempt the boxcar convolution using the boxshape based upon the user
# input value of "grow"
ker = np.ones((boxsize, boxsize)) / float(boxsize**2)
minimum_grow_file = signal.convolve2d(minimum_flag_file, ker,
boundary='fill', mode='same')
median_rms_file = np.where(
np.equal(minimum_grow_file, 0),
median_file_weighted - rms_file * combine_nsigma1,
median_file_weighted - rms_file * combine_nsigma2
)
del rms_file, minimum_grow_file
# Finally decide whether to use the minimim or the median (in counts/s),
# based on whether the median is more than 3 sigma above the minimum.
combined_array = np.where(
np.less(minimum_file_weighted, median_rms_file),
minimum_file,
median_file
)
# Set fill regions to a pixel value of 0.
combined_array[all_bad_idx, all_bad_idy] = 0
return combined_array | [
"def",
"min_med",
"(",
"images",
",",
"weight_images",
",",
"readnoise_list",
",",
"exptime_list",
",",
"background_values",
",",
"weight_masks",
"=",
"None",
",",
"combine_grow",
"=",
"1",
",",
"combine_nsigma1",
"=",
"4",
",",
"combine_nsigma2",
"=",
"3",
",",
"fillval",
"=",
"False",
")",
":",
"# In this case we want to calculate two things:",
"# 1) the median array, rejecting the highest pixel (thus running",
"# imcombine with nlow=0, nhigh=1, nkeep=1, using the masks)",
"# 2) the lowest valid pixel after applying the masks (thus running",
"# imcombine with nlow=0, nhigh=3, nkeep=1, using the masks)",
"#",
"# We also calculate the sum of the weight files (to produce the total",
"# effective exposure time for each pixel).",
"#",
"# The total effective background in the final image is calculated as",
"# follows:",
"# - convert background for each input image to counts/s",
"# (divide by exptime)",
"# - multiply this value by the weight image, to obtain the effective",
"# background counts (in DN) for each pixel, for each image",
"# - Add these images together, to obtain the total effective background",
"# for the combined image.",
"#",
"# Once we've made these two files, then calculate the SNR based on the",
"# median-pixel image, and compare with the minimum.",
"nimages",
"=",
"len",
"(",
"images",
")",
"combtype_median",
"=",
"'imedian'",
"if",
"fillval",
"else",
"'median'",
"images",
"=",
"np",
".",
"asarray",
"(",
"images",
")",
"weight_images",
"=",
"np",
".",
"asarray",
"(",
"weight_images",
")",
"if",
"weight_masks",
"==",
"[",
"]",
"or",
"weight_masks",
"is",
"None",
":",
"weight_masks",
"=",
"None",
"mask_sum",
"=",
"np",
".",
"zeros",
"(",
"images",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"np",
".",
"int16",
")",
"all_bad_idx",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"all_bad_idy",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"else",
":",
"weight_masks",
"=",
"np",
".",
"asarray",
"(",
"weight_masks",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"mask_sum",
"=",
"np",
".",
"sum",
"(",
"weight_masks",
",",
"axis",
"=",
"0",
",",
"dtype",
"=",
"np",
".",
"int16",
")",
"all_bad_idx",
",",
"all_bad_idy",
"=",
"np",
".",
"where",
"(",
"mask_sum",
"==",
"nimages",
")",
"# Create a different median image based upon the number of images in the",
"# input list.",
"if",
"nimages",
"==",
"2",
":",
"median_file",
"=",
"num_combine",
"(",
"images",
",",
"masks",
"=",
"weight_masks",
",",
"combination_type",
"=",
"'imean'",
"if",
"fillval",
"else",
"'mean'",
",",
"nlow",
"=",
"0",
",",
"nhigh",
"=",
"0",
",",
"lower",
"=",
"None",
",",
"upper",
"=",
"None",
")",
"else",
":",
"# The value of NHIGH=1 will cause problems when there is only 1 valid",
"# unmasked input image for that pixel due to a difference in behavior",
"# between 'num_combine' and 'iraf.imcombine'.",
"# This value may need to be adjusted on the fly based on the number of",
"# inputs and the number of masked values/pixel.",
"#",
"median_file",
"=",
"num_combine",
"(",
"images",
",",
"masks",
"=",
"weight_masks",
",",
"combination_type",
"=",
"combtype_median",
",",
"nlow",
"=",
"0",
",",
"nhigh",
"=",
"1",
",",
"lower",
"=",
"None",
",",
"upper",
"=",
"None",
")",
"# The following section of code will address the problem caused by",
"# having a value of nhigh = 1. This will behave in a way similar to",
"# the way the IRAF task IMCOMBINE behaves. In order to accomplish",
"# this, the following procedure will be followed:",
"# 1) The input masks will be summed.",
"# 2) The science data will be summed.",
"# 3) In the locations of the summed mask where the sum is 1 less than",
"# the total number of images, the value of that location in the",
"# summed science image will be used to replace the existing value",
"# in the existing median_file.",
"#",
"# This procedure is being used to prevent too much data from being",
"# thrown out of the image. Take for example the case of 3 input images.",
"# In two of the images the pixel locations have been masked out.",
"# Now, if nhigh is applied there will be no value to use for that",
"# position. However, if this new procedure is used that value in",
"# the resulting images will be the value that was rejected by the",
"# nhigh rejection step.",
"# We need to make certain that \"bad\" pixels in the sci data are set to",
"# 0. That way, when the sci images are summed, the value of the sum",
"# will only come from the \"good\" pixels.",
"if",
"weight_masks",
"is",
"None",
":",
"sci_sum",
"=",
"np",
".",
"sum",
"(",
"images",
",",
"axis",
"=",
"0",
")",
"if",
"nimages",
"==",
"1",
":",
"median_file",
"=",
"sci_sum",
"else",
":",
"sci_sum",
"=",
"np",
".",
"sum",
"(",
"images",
"*",
"np",
".",
"logical_not",
"(",
"weight_masks",
")",
",",
"axis",
"=",
"0",
")",
"# Use the summed sci image values in locations where the mask_sum",
"# indicates that there is only 1 good pixel to use. The value will",
"# be used in the median_file image",
"idx",
"=",
"np",
".",
"where",
"(",
"mask_sum",
"==",
"(",
"nimages",
"-",
"1",
")",
")",
"median_file",
"[",
"idx",
"]",
"=",
"sci_sum",
"[",
"idx",
"]",
"# Create the minimum image from the stack of input images.",
"if",
"weight_masks",
"is",
"not",
"None",
":",
"# make a copy of images to avoid side-effect of modifying input",
"# argument:",
"images",
"=",
"images",
".",
"copy",
"(",
")",
"images",
"[",
"weight_masks",
"]",
"=",
"np",
".",
"nan",
"images",
"[",
":",
",",
"all_bad_idx",
",",
"all_bad_idy",
"]",
"=",
"0",
"minimum_file",
"=",
"np",
".",
"nanmin",
"(",
"images",
",",
"axis",
"=",
"0",
")",
"else",
":",
"minimum_file",
"=",
"np",
".",
"amin",
"(",
"images",
",",
"axis",
"=",
"0",
")",
"# Scale the weight images by the background values and add them to the bk",
"# Create an image of the total effective background (in DN) per pixel:",
"# (which is the sum of all the background-scaled weight files)",
"s",
"=",
"np",
".",
"asarray",
"(",
"[",
"bv",
"/",
"et",
"for",
"bv",
",",
"et",
"in",
"zip",
"(",
"background_values",
",",
"exptime_list",
")",
"]",
")",
"bkgd_file",
"=",
"np",
".",
"sum",
"(",
"weight_images",
"*",
"s",
"[",
":",
",",
"None",
",",
"None",
"]",
",",
"axis",
"=",
"0",
")",
"# Scale the weight mask images by the square of the readnoise values.",
"# Create an image of the total readnoise**2 per pixel",
"# (which is the sum of all the input readnoise values).",
"if",
"weight_masks",
"is",
"None",
":",
"rdn2",
"=",
"sum",
"(",
"(",
"r",
"**",
"2",
"for",
"r",
"in",
"readnoise_list",
")",
")",
"readnoise_file",
"=",
"rdn2",
"*",
"np",
".",
"ones_like",
"(",
"images",
"[",
"0",
"]",
")",
"else",
":",
"readnoise_file",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"logical_not",
"(",
"weight_masks",
")",
"*",
"(",
"np",
".",
"asarray",
"(",
"readnoise_list",
")",
"**",
"2",
")",
"[",
":",
",",
"None",
",",
"None",
"]",
",",
"axis",
"=",
"0",
")",
"# Create an image of the total effective exposure time per pixel:",
"# (which is simply the sum of all the drizzle output weight files)",
"weight_file",
"=",
"np",
".",
"sum",
"(",
"weight_images",
",",
"axis",
"=",
"0",
")",
"# Scale up both the median and minimum arrays by the total effective",
"# exposure time per pixel.",
"minimum_file_weighted",
"=",
"minimum_file",
"*",
"weight_file",
"median_file_weighted",
"=",
"median_file",
"*",
"weight_file",
"del",
"weight_file",
"# Calculate the 1-sigma r.m.s.:",
"# variance = median_electrons + bkgd_electrons + readnoise**2",
"# rms = sqrt(variance)",
"# This image has units of electrons.",
"#",
"# make this the abs value so that negative numbers dont throw an exception?",
"rms_file2",
"=",
"np",
".",
"fmax",
"(",
"median_file_weighted",
"+",
"bkgd_file",
"+",
"readnoise_file",
",",
"np",
".",
"zeros_like",
"(",
"median_file_weighted",
")",
")",
"rms_file",
"=",
"np",
".",
"sqrt",
"(",
"rms_file2",
")",
"del",
"bkgd_file",
",",
"readnoise_file",
"# For the median array, calculate the n-sigma lower threshold to the array",
"# and incorporate that into the pixel values.",
"median_rms_file",
"=",
"median_file_weighted",
"-",
"rms_file",
"*",
"combine_nsigma1",
"if",
"combine_grow",
"!=",
"0",
":",
"# Do a more sophisticated rejection: For all cases where the minimum",
"# pixel will be accepted instead of the median, set a lower threshold",
"# for that pixel and the ones around it (ie become less conservative",
"# in rejecting the median). This is because in cases of",
"# triple-incidence cosmic rays, quite often the low-lying outliers",
"# of the CRs can influence the median for the initial relatively high",
"# value of sigma, so a lower threshold must be used to mnake sure that",
"# the minimum is selected.",
"#",
"# This is done as follows:",
"# 1) make an image which is zero everywhere except where the minimum",
"# will be accepted",
"# 2) box-car smooth this image, to make these regions grow.",
"# 3) In the file \"median_rms_file_electrons\", replace these pixels",
"# by median - combine_nsigma2 * rms",
"#",
"# Then use this image in the final replacement, in the same way as for",
"# the case where this option is not selected.",
"minimum_flag_file",
"=",
"np",
".",
"less",
"(",
"minimum_file_weighted",
",",
"median_rms_file",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"# The box size value must be an integer. This is not a problem since",
"# __combine_grow should always be an integer type. The combine_grow",
"# column in the MDRIZTAB should also be an integer type.",
"boxsize",
"=",
"int",
"(",
"2",
"*",
"combine_grow",
"+",
"1",
")",
"boxshape",
"=",
"(",
"boxsize",
",",
"boxsize",
")",
"minimum_grow_file",
"=",
"np",
".",
"zeros_like",
"(",
"images",
"[",
"0",
"]",
")",
"# If the boxcar convolution has failed it is potentially for",
"# two reasons:",
"# 1) The kernel size for the boxcar is bigger than the actual image.",
"# 2) The grow parameter was specified with a value < 0. This would",
"# result in an illegal boxshape kernel. The dimensions of the",
"# kernel box *MUST* be integer and greater than zero.",
"#",
"# If the boxcar convolution has failed, try to give a meaningfull",
"# explanation as to why based upon the conditionals described above.",
"if",
"boxsize",
"<=",
"0",
":",
"errormsg1",
"=",
"\"############################################################\\n\"",
"errormsg1",
"+=",
"\"# The boxcar convolution in minmed has failed. The 'grow' #\\n\"",
"errormsg1",
"+=",
"\"# parameter must be greater than or equal to zero. You #\\n\"",
"errormsg1",
"+=",
"\"# specified an input value for the 'grow' parameter of: #\\n\"",
"errormsg1",
"+=",
"\" combine_grow: \"",
"+",
"str",
"(",
"combine_grow",
")",
"+",
"'\\n'",
"errormsg1",
"+=",
"\"############################################################\\n\"",
"raise",
"ValueError",
"(",
"errormsg1",
")",
"if",
"boxsize",
">",
"images",
".",
"shape",
"[",
"1",
"]",
":",
"errormsg2",
"=",
"\"############################################################\\n\"",
"errormsg2",
"+=",
"\"# The boxcar convolution in minmed has failed. The 'grow' #\\n\"",
"errormsg2",
"+=",
"\"# parameter specified has resulted in a boxcar kernel that #\\n\"",
"errormsg2",
"+=",
"\"# has dimensions larger than the actual image. You #\\n\"",
"errormsg2",
"+=",
"\"# specified an input value for the 'grow' parameter of: #\\n\"",
"errormsg2",
"+=",
"\" combine_grow: \"",
"+",
"str",
"(",
"combine_grow",
")",
"+",
"'\\n'",
"errormsg2",
"+=",
"\"############################################################\\n\"",
"print",
"(",
"images",
".",
"shape",
"[",
"1",
":",
"]",
")",
"raise",
"ValueError",
"(",
"errormsg2",
")",
"# Attempt the boxcar convolution using the boxshape based upon the user",
"# input value of \"grow\"",
"ker",
"=",
"np",
".",
"ones",
"(",
"(",
"boxsize",
",",
"boxsize",
")",
")",
"/",
"float",
"(",
"boxsize",
"**",
"2",
")",
"minimum_grow_file",
"=",
"signal",
".",
"convolve2d",
"(",
"minimum_flag_file",
",",
"ker",
",",
"boundary",
"=",
"'fill'",
",",
"mode",
"=",
"'same'",
")",
"median_rms_file",
"=",
"np",
".",
"where",
"(",
"np",
".",
"equal",
"(",
"minimum_grow_file",
",",
"0",
")",
",",
"median_file_weighted",
"-",
"rms_file",
"*",
"combine_nsigma1",
",",
"median_file_weighted",
"-",
"rms_file",
"*",
"combine_nsigma2",
")",
"del",
"rms_file",
",",
"minimum_grow_file",
"# Finally decide whether to use the minimim or the median (in counts/s),",
"# based on whether the median is more than 3 sigma above the minimum.",
"combined_array",
"=",
"np",
".",
"where",
"(",
"np",
".",
"less",
"(",
"minimum_file_weighted",
",",
"median_rms_file",
")",
",",
"minimum_file",
",",
"median_file",
")",
"# Set fill regions to a pixel value of 0.",
"combined_array",
"[",
"all_bad_idx",
",",
"all_bad_idy",
"]",
"=",
"0",
"return",
"combined_array"
]
| 43.8 | 24.407018 |
def update_line(self, t, x, y, **kw):
"""overwrite data for trace t """
self.panel.update_line(t, x, y, **kw) | [
"def",
"update_line",
"(",
"self",
",",
"t",
",",
"x",
",",
"y",
",",
"*",
"*",
"kw",
")",
":",
"self",
".",
"panel",
".",
"update_line",
"(",
"t",
",",
"x",
",",
"y",
",",
"*",
"*",
"kw",
")"
]
| 41 | 2.666667 |
def _object_with_attr(self, name):
"""
Returns the first object that has the attribute `name`
:param name: the attribute to filter by
:type name: `str`
:raises AttributeError: when no object has the named attribute
"""
for obj in self._objects:
if hasattr(obj, name):
return obj
raise AttributeError("No object has attribute {!r}".format(name)) | [
"def",
"_object_with_attr",
"(",
"self",
",",
"name",
")",
":",
"for",
"obj",
"in",
"self",
".",
"_objects",
":",
"if",
"hasattr",
"(",
"obj",
",",
"name",
")",
":",
"return",
"obj",
"raise",
"AttributeError",
"(",
"\"No object has attribute {!r}\"",
".",
"format",
"(",
"name",
")",
")"
]
| 32.769231 | 16.923077 |
def get_time_axis(start_time, end_time, time_step, time_axis=None):
"""
Create a list of datetimes based on an start time, end time and
time step. If such a list is already passed in, then this is not
necessary.
Often either the start_time, end_time, time_step is passed into an
app or the time_axis is passed in directly. This function returns a
time_axis in both situations.
"""
#Do this import here to avoid a circular dependency
from ..lib import units
if time_axis is not None:
actual_dates_axis = []
for t in time_axis:
#If the user has entered the time_axis with commas, remove them.
t = t.replace(',', '').strip()
if t == '':
continue
actual_dates_axis.append(get_datetime(t))
return actual_dates_axis
else:
if start_time is None:
raise HydraPluginError("A start time must be specified")
if end_time is None:
raise HydraPluginError("And end time must be specified")
if time_step is None:
raise HydraPluginError("A time-step must be specified")
start_date = get_datetime(start_time)
end_date = get_datetime(end_time)
delta_t, value, output_units = parse_time_step(time_step, units_ref=units)
time_axis = [start_date]
value = int(value)
while start_date < end_date:
#Months and years are a special case, so treat them differently
if(output_units.lower() == "mon"):
start_date = start_date + relativedelta(months=value)
elif (output_units.lower() == "yr"):
start_date = start_date + relativedelta(years=value)
else:
start_date += timedelta(seconds=delta_t)
time_axis.append(start_date)
return time_axis | [
"def",
"get_time_axis",
"(",
"start_time",
",",
"end_time",
",",
"time_step",
",",
"time_axis",
"=",
"None",
")",
":",
"#Do this import here to avoid a circular dependency",
"from",
".",
".",
"lib",
"import",
"units",
"if",
"time_axis",
"is",
"not",
"None",
":",
"actual_dates_axis",
"=",
"[",
"]",
"for",
"t",
"in",
"time_axis",
":",
"#If the user has entered the time_axis with commas, remove them.",
"t",
"=",
"t",
".",
"replace",
"(",
"','",
",",
"''",
")",
".",
"strip",
"(",
")",
"if",
"t",
"==",
"''",
":",
"continue",
"actual_dates_axis",
".",
"append",
"(",
"get_datetime",
"(",
"t",
")",
")",
"return",
"actual_dates_axis",
"else",
":",
"if",
"start_time",
"is",
"None",
":",
"raise",
"HydraPluginError",
"(",
"\"A start time must be specified\"",
")",
"if",
"end_time",
"is",
"None",
":",
"raise",
"HydraPluginError",
"(",
"\"And end time must be specified\"",
")",
"if",
"time_step",
"is",
"None",
":",
"raise",
"HydraPluginError",
"(",
"\"A time-step must be specified\"",
")",
"start_date",
"=",
"get_datetime",
"(",
"start_time",
")",
"end_date",
"=",
"get_datetime",
"(",
"end_time",
")",
"delta_t",
",",
"value",
",",
"output_units",
"=",
"parse_time_step",
"(",
"time_step",
",",
"units_ref",
"=",
"units",
")",
"time_axis",
"=",
"[",
"start_date",
"]",
"value",
"=",
"int",
"(",
"value",
")",
"while",
"start_date",
"<",
"end_date",
":",
"#Months and years are a special case, so treat them differently",
"if",
"(",
"output_units",
".",
"lower",
"(",
")",
"==",
"\"mon\"",
")",
":",
"start_date",
"=",
"start_date",
"+",
"relativedelta",
"(",
"months",
"=",
"value",
")",
"elif",
"(",
"output_units",
".",
"lower",
"(",
")",
"==",
"\"yr\"",
")",
":",
"start_date",
"=",
"start_date",
"+",
"relativedelta",
"(",
"years",
"=",
"value",
")",
"else",
":",
"start_date",
"+=",
"timedelta",
"(",
"seconds",
"=",
"delta_t",
")",
"time_axis",
".",
"append",
"(",
"start_date",
")",
"return",
"time_axis"
]
| 37.77551 | 20.428571 |
def AvgPool(a, k, strides, padding, data_format):
"""
Average pooling op.
"""
if data_format.decode("ascii") == "NCHW":
a = np.rollaxis(a, 1, -1),
patches = _pool_patches(a, k, strides, padding.decode("ascii"))
pool = np.average(patches, axis=tuple(range(-len(k), 0)))
if data_format.decode("ascii") == "NCHW":
pool = np.rollaxis(pool, -1, 1)
return pool, | [
"def",
"AvgPool",
"(",
"a",
",",
"k",
",",
"strides",
",",
"padding",
",",
"data_format",
")",
":",
"if",
"data_format",
".",
"decode",
"(",
"\"ascii\"",
")",
"==",
"\"NCHW\"",
":",
"a",
"=",
"np",
".",
"rollaxis",
"(",
"a",
",",
"1",
",",
"-",
"1",
")",
",",
"patches",
"=",
"_pool_patches",
"(",
"a",
",",
"k",
",",
"strides",
",",
"padding",
".",
"decode",
"(",
"\"ascii\"",
")",
")",
"pool",
"=",
"np",
".",
"average",
"(",
"patches",
",",
"axis",
"=",
"tuple",
"(",
"range",
"(",
"-",
"len",
"(",
"k",
")",
",",
"0",
")",
")",
")",
"if",
"data_format",
".",
"decode",
"(",
"\"ascii\"",
")",
"==",
"\"NCHW\"",
":",
"pool",
"=",
"np",
".",
"rollaxis",
"(",
"pool",
",",
"-",
"1",
",",
"1",
")",
"return",
"pool",
","
]
| 28.071429 | 16.785714 |
def ticket_metric_show(self, ticket_metric_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/ticket_metrics#show-ticket-metrics"
api_path = "/api/v2/ticket_metrics/{ticket_metric_id}.json"
api_path = api_path.format(ticket_metric_id=ticket_metric_id)
return self.call(api_path, **kwargs) | [
"def",
"ticket_metric_show",
"(",
"self",
",",
"ticket_metric_id",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/ticket_metrics/{ticket_metric_id}.json\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"ticket_metric_id",
"=",
"ticket_metric_id",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"*",
"*",
"kwargs",
")"
]
| 66 | 26 |
def resetStats(self):
""" Reset the learning and inference stats. This will usually be called by
user code at the start of each inference run (for a particular data set).
"""
self._stats = dict()
self._internalStats = dict()
self._internalStats['nInfersSinceReset'] = 0
self._internalStats['nPredictions'] = 0
#New prediction score
self._internalStats['curPredictionScore2'] = 0
self._internalStats['predictionScoreTotal2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['falseNegativeScoreTotal'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['falsePositiveScoreTotal'] = 0
self._internalStats['pctExtraTotal'] = 0
self._internalStats['pctMissingTotal'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
self._internalStats['totalMissing'] = 0
self._internalStats['totalExtra'] = 0
# Sequence signature statistics. Note that we don't reset the sequence
# signature list itself.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
self._internalStats['confHistogram'] = \
numpy.zeros((self.numberOfCols, self.cellsPerColumn), dtype="float32") | [
"def",
"resetStats",
"(",
"self",
")",
":",
"self",
".",
"_stats",
"=",
"dict",
"(",
")",
"self",
".",
"_internalStats",
"=",
"dict",
"(",
")",
"self",
".",
"_internalStats",
"[",
"'nInfersSinceReset'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'nPredictions'",
"]",
"=",
"0",
"#New prediction score",
"self",
".",
"_internalStats",
"[",
"'curPredictionScore2'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'predictionScoreTotal2'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'curFalseNegativeScore'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'falseNegativeScoreTotal'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'curFalsePositiveScore'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'falsePositiveScoreTotal'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'pctExtraTotal'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'pctMissingTotal'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'curMissing'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'curExtra'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'totalMissing'",
"]",
"=",
"0",
"self",
".",
"_internalStats",
"[",
"'totalExtra'",
"]",
"=",
"0",
"# Sequence signature statistics. Note that we don't reset the sequence",
"# signature list itself.",
"self",
".",
"_internalStats",
"[",
"'prevSequenceSignature'",
"]",
"=",
"None",
"if",
"self",
".",
"collectSequenceStats",
":",
"self",
".",
"_internalStats",
"[",
"'confHistogram'",
"]",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"self",
".",
"numberOfCols",
",",
"self",
".",
"cellsPerColumn",
")",
",",
"dtype",
"=",
"\"float32\"",
")"
]
| 40.290323 | 15.483871 |
def fetch_changes(repo_path, up_commit='master'):
"""
Fetch latest changes from stage and touch .timestamp
if any python sources have been modified.
"""
last_up_commit = None
prevcwd = os.getcwd()
try:
gitexe = 'git'
os.chdir(repo_path)
old_sources_timestamp = sources_latest_timestamp('.')
shell_command([gitexe, 'pull'])
last_up_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
shell_command([gitexe, 'checkout', up_commit])
up_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
new_sources_timestamp = sources_latest_timestamp('.')
if old_sources_timestamp < new_sources_timestamp:
with open('.timestamp', 'w') as up_commit_file:
up_commit_file.write(up_commit)
finally:
os.chdir(prevcwd)
return last_up_commit, up_commit | [
"def",
"fetch_changes",
"(",
"repo_path",
",",
"up_commit",
"=",
"'master'",
")",
":",
"last_up_commit",
"=",
"None",
"prevcwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"try",
":",
"gitexe",
"=",
"'git'",
"os",
".",
"chdir",
"(",
"repo_path",
")",
"old_sources_timestamp",
"=",
"sources_latest_timestamp",
"(",
"'.'",
")",
"shell_command",
"(",
"[",
"gitexe",
",",
"'pull'",
"]",
")",
"last_up_commit",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'git'",
",",
"'rev-parse'",
",",
"'HEAD'",
"]",
")",
"shell_command",
"(",
"[",
"gitexe",
",",
"'checkout'",
",",
"up_commit",
"]",
")",
"up_commit",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"'git'",
",",
"'rev-parse'",
",",
"'HEAD'",
"]",
")",
"new_sources_timestamp",
"=",
"sources_latest_timestamp",
"(",
"'.'",
")",
"if",
"old_sources_timestamp",
"<",
"new_sources_timestamp",
":",
"with",
"open",
"(",
"'.timestamp'",
",",
"'w'",
")",
"as",
"up_commit_file",
":",
"up_commit_file",
".",
"write",
"(",
"up_commit",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"prevcwd",
")",
"return",
"last_up_commit",
",",
"up_commit"
]
| 39.681818 | 15.5 |
def kill(self, sig):
"""Send the given signal to the child application.
In keeping with UNIX tradition it has a misleading name. It does not
necessarily kill the child unless you send the right signal. See the
:mod:`signal` module for constants representing signal numbers.
"""
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig) | [
"def",
"kill",
"(",
"self",
",",
"sig",
")",
":",
"# Same as os.kill, but the pid is given for you.",
"if",
"self",
".",
"isalive",
"(",
")",
":",
"os",
".",
"kill",
"(",
"self",
".",
"pid",
",",
"sig",
")"
]
| 38.909091 | 21.727273 |
def _set_filter(self, v, load=False):
"""
Setter method for filter, mapped from YANG variable /interface_vlan/interface/vlan/ip/arp/inspection/filter (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_filter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_filter() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=filter.filter, is_container='container', presence=False, yang_name="filter", rest_name="filter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Arp Inspection Filter'}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """filter must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=filter.filter, is_container='container', presence=False, yang_name="filter", rest_name="filter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Arp Inspection Filter'}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)""",
})
self.__filter = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_filter",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"filter",
".",
"filter",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"filter\"",
",",
"rest_name",
"=",
"\"filter\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Configure Arp Inspection Filter'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-dai'",
",",
"defining_module",
"=",
"'brocade-dai'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"filter must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=filter.filter, is_container='container', presence=False, yang_name=\"filter\", rest_name=\"filter\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Arp Inspection Filter'}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__filter",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
]
| 71.454545 | 33.772727 |
def play_audio(filename: str):
"""
Args:
filename: Audio filename
"""
import platform
from subprocess import Popen
player = 'play' if platform.system() == 'Darwin' else 'aplay'
Popen([player, '-q', filename]) | [
"def",
"play_audio",
"(",
"filename",
":",
"str",
")",
":",
"import",
"platform",
"from",
"subprocess",
"import",
"Popen",
"player",
"=",
"'play'",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Darwin'",
"else",
"'aplay'",
"Popen",
"(",
"[",
"player",
",",
"'-q'",
",",
"filename",
"]",
")"
]
| 23.6 | 14.8 |
def play_move_msg(self, move_msg):
"""Another play move function for move message.
Parameters
----------
move_msg : string
a valid message should be in:
"[move type]: [X], [Y]"
"""
move_type, move_x, move_y = self.parse_move(move_msg)
self.play_move(move_type, move_x, move_y) | [
"def",
"play_move_msg",
"(",
"self",
",",
"move_msg",
")",
":",
"move_type",
",",
"move_x",
",",
"move_y",
"=",
"self",
".",
"parse_move",
"(",
"move_msg",
")",
"self",
".",
"play_move",
"(",
"move_type",
",",
"move_x",
",",
"move_y",
")"
]
| 31.545455 | 12.818182 |
def split_source(source_code):
'''Split source code into lines
'''
eol_chars = get_eol_chars(source_code)
if eol_chars:
return source_code.split(eol_chars)
else:
return [source_code] | [
"def",
"split_source",
"(",
"source_code",
")",
":",
"eol_chars",
"=",
"get_eol_chars",
"(",
"source_code",
")",
"if",
"eol_chars",
":",
"return",
"source_code",
".",
"split",
"(",
"eol_chars",
")",
"else",
":",
"return",
"[",
"source_code",
"]"
]
| 27.25 | 14.5 |
def setShowGridRows( self, state ):
"""
Sets whether or not the grid rows should be rendered when drawing the \
grid.
:param state | <bool>
"""
delegate = self.itemDelegate()
if ( isinstance(delegate, XTreeWidgetDelegate) ):
delegate.setShowGridRows(state) | [
"def",
"setShowGridRows",
"(",
"self",
",",
"state",
")",
":",
"delegate",
"=",
"self",
".",
"itemDelegate",
"(",
")",
"if",
"(",
"isinstance",
"(",
"delegate",
",",
"XTreeWidgetDelegate",
")",
")",
":",
"delegate",
".",
"setShowGridRows",
"(",
"state",
")"
]
| 33.8 | 12.8 |
def parse_load_fk(cls, data: Dict[str, List[Dict[str, object]]]) -> Dict[str, List[Dict[str, object]]]:
"""
:param data:{
<column>: role,
<column2>: role,
<column>: {
'role': role,
'loadfk': { ... },
},
:return: {
<column>: {
'role': role,
},
...
<column3>: {
'role': role,
'loadfk': { ... },
},
}
"""
default_value_dict = {'role': None, 'as': None, 'table': None, 'loadfk': None}
def value_normalize_dict(value):
def check(k, v):
if k == 'role': return isinstance(v, str)
if k == 'as': return isinstance(v, str)
if k == 'table': return isinstance(v, str)
if k == 'loadfk': return isinstance(v, dict)
valid = {k: v for k, v in value.items() if check(k, v)}
if not valid: return default_value_dict.copy()
if 'loadfk' in valid and valid['loadfk']:
valid['loadfk'] = cls.parse_load_fk(valid['loadfk'])
for k, v in default_value_dict.items():
valid.setdefault(k, v)
return valid
def value_normalize(value, no_list=True):
if value is None:
return default_value_dict.copy()
elif not no_list and isinstance(value, List):
# <column>: [value1, value2, ...]
return list(map(value_normalize, value))
elif isinstance(value, str):
# <column>: role
val = default_value_dict.copy()
val['role'] = value
return val
elif isinstance(value, Dict):
# {'role': <str>, 'as': <str>, ...}
return value_normalize_dict(value)
else:
raise InvalidParams('Invalid syntax for "loadfk": %s' % value)
# 对全部项进行检查
new_data = {}
if not isinstance(data, dict):
raise InvalidParams('Invalid syntax for "loadfk": %s' % data)
for k, v in data.items():
nv = value_normalize(v, False)
new_data[k] = nv if isinstance(nv, List) else [nv]
return new_data | [
"def",
"parse_load_fk",
"(",
"cls",
",",
"data",
":",
"Dict",
"[",
"str",
",",
"List",
"[",
"Dict",
"[",
"str",
",",
"object",
"]",
"]",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"List",
"[",
"Dict",
"[",
"str",
",",
"object",
"]",
"]",
"]",
":",
"default_value_dict",
"=",
"{",
"'role'",
":",
"None",
",",
"'as'",
":",
"None",
",",
"'table'",
":",
"None",
",",
"'loadfk'",
":",
"None",
"}",
"def",
"value_normalize_dict",
"(",
"value",
")",
":",
"def",
"check",
"(",
"k",
",",
"v",
")",
":",
"if",
"k",
"==",
"'role'",
":",
"return",
"isinstance",
"(",
"v",
",",
"str",
")",
"if",
"k",
"==",
"'as'",
":",
"return",
"isinstance",
"(",
"v",
",",
"str",
")",
"if",
"k",
"==",
"'table'",
":",
"return",
"isinstance",
"(",
"v",
",",
"str",
")",
"if",
"k",
"==",
"'loadfk'",
":",
"return",
"isinstance",
"(",
"v",
",",
"dict",
")",
"valid",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
"if",
"check",
"(",
"k",
",",
"v",
")",
"}",
"if",
"not",
"valid",
":",
"return",
"default_value_dict",
".",
"copy",
"(",
")",
"if",
"'loadfk'",
"in",
"valid",
"and",
"valid",
"[",
"'loadfk'",
"]",
":",
"valid",
"[",
"'loadfk'",
"]",
"=",
"cls",
".",
"parse_load_fk",
"(",
"valid",
"[",
"'loadfk'",
"]",
")",
"for",
"k",
",",
"v",
"in",
"default_value_dict",
".",
"items",
"(",
")",
":",
"valid",
".",
"setdefault",
"(",
"k",
",",
"v",
")",
"return",
"valid",
"def",
"value_normalize",
"(",
"value",
",",
"no_list",
"=",
"True",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"default_value_dict",
".",
"copy",
"(",
")",
"elif",
"not",
"no_list",
"and",
"isinstance",
"(",
"value",
",",
"List",
")",
":",
"# <column>: [value1, value2, ...]",
"return",
"list",
"(",
"map",
"(",
"value_normalize",
",",
"value",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"# <column>: role",
"val",
"=",
"default_value_dict",
".",
"copy",
"(",
")",
"val",
"[",
"'role'",
"]",
"=",
"value",
"return",
"val",
"elif",
"isinstance",
"(",
"value",
",",
"Dict",
")",
":",
"# {'role': <str>, 'as': <str>, ...}",
"return",
"value_normalize_dict",
"(",
"value",
")",
"else",
":",
"raise",
"InvalidParams",
"(",
"'Invalid syntax for \"loadfk\": %s'",
"%",
"value",
")",
"# 对全部项进行检查",
"new_data",
"=",
"{",
"}",
"if",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"raise",
"InvalidParams",
"(",
"'Invalid syntax for \"loadfk\": %s'",
"%",
"data",
")",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
":",
"nv",
"=",
"value_normalize",
"(",
"v",
",",
"False",
")",
"new_data",
"[",
"k",
"]",
"=",
"nv",
"if",
"isinstance",
"(",
"nv",
",",
"List",
")",
"else",
"[",
"nv",
"]",
"return",
"new_data"
]
| 36.580645 | 17.290323 |
def add_packages(self, packages):
"""
Adds an automatic resolution of urls into tasks.
:param packages: The url will determine package/module and the class.
:return: self
"""
# type: (List[str])->TaskNamespace
assert isinstance(packages, list), "Packages must be list of strings."
self._task_packages += packages
return self | [
"def",
"add_packages",
"(",
"self",
",",
"packages",
")",
":",
"# type: (List[str])->TaskNamespace",
"assert",
"isinstance",
"(",
"packages",
",",
"list",
")",
",",
"\"Packages must be list of strings.\"",
"self",
".",
"_task_packages",
"+=",
"packages",
"return",
"self"
]
| 38.7 | 14.1 |
def eqstr(a, b):
"""
Determine whether two strings are equivalent.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/eqstr_c.html
:param a: Arbitrary character string.
:type a: str
:param b: Arbitrary character string.
:type b: str
:return: True if A and B are equivalent.
:rtype: bool
"""
return bool(libspice.eqstr_c(stypes.stringToCharP(a), stypes.stringToCharP(b))) | [
"def",
"eqstr",
"(",
"a",
",",
"b",
")",
":",
"return",
"bool",
"(",
"libspice",
".",
"eqstr_c",
"(",
"stypes",
".",
"stringToCharP",
"(",
"a",
")",
",",
"stypes",
".",
"stringToCharP",
"(",
"b",
")",
")",
")"
]
| 29.142857 | 19 |
def Yashar(x, rhol, rhog, mul, mug, m, D, g=g):
r'''Calculates void fraction in two-phase flow according to the model of
[1]_ also given in [2]_ and [3]_.
.. math::
\alpha = \left[1 + \frac{1}{Ft} + X_{tt}\right]^{-0.321}
.. math::
Ft = \left[\frac{G_{tp}^2 x^3}{(1-x)\rho_g^2gD}\right]^{0.5}
Parameters
----------
x : float
Quality at the specific tube interval []
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
mug : float
Viscosity of gas [Pa*s]
m : float
Mass flow rate of both phases, [kg/s]
D : float
Diameter of the channel, [m]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
alpha : float
Void fraction (area of gas / total area of channel), [-]
Notes
-----
[1]_ has been reviewed; both [2]_ and [3]_ give it correctly.
Examples
--------
>>> Yashar(.4, 800, 2.5, 1E-3, 1E-5, m=1, D=0.3)
0.7934893185789146
References
----------
.. [1] Yashar, D. A., M. J. Wilson, H. R. Kopke, D. M. Graham, J. C. Chato,
and T. A. Newell. "An Investigation of Refrigerant Void Fraction in
Horizontal, Microfin Tubes." HVAC&R Research 7, no. 1 (January 1, 2001):
67-82. doi:10.1080/10789669.2001.10391430.
.. [2] Xu, Yu, and Xiande Fang. "Correlations of Void Fraction for Two-
Phase Refrigerant Flow in Pipes." Applied Thermal Engineering 64, no.
1-2 (March 2014): 242–51. doi:10.1016/j.applthermaleng.2013.12.032.
.. [3] Dalkilic, A. S., S. Laohalertdecha, and S. Wongwises. "Effect of
Void Fraction Models on the Two-Phase Friction Factor of R134a during
Condensation in Vertical Downward Flow in a Smooth Tube." International
Communications in Heat and Mass Transfer 35, no. 8 (October 2008):
921-27. doi:10.1016/j.icheatmasstransfer.2008.04.001.
'''
G = m/(pi/4*D**2)
Ft = (G**2*x**3/((1-x)*rhog**2*g*D))**0.5
Xtt = Lockhart_Martinelli_Xtt(x, rhol, rhog, mul, mug)
return (1 + 1./Ft + Xtt)**-0.321 | [
"def",
"Yashar",
"(",
"x",
",",
"rhol",
",",
"rhog",
",",
"mul",
",",
"mug",
",",
"m",
",",
"D",
",",
"g",
"=",
"g",
")",
":",
"G",
"=",
"m",
"/",
"(",
"pi",
"/",
"4",
"*",
"D",
"**",
"2",
")",
"Ft",
"=",
"(",
"G",
"**",
"2",
"*",
"x",
"**",
"3",
"/",
"(",
"(",
"1",
"-",
"x",
")",
"*",
"rhog",
"**",
"2",
"*",
"g",
"*",
"D",
")",
")",
"**",
"0.5",
"Xtt",
"=",
"Lockhart_Martinelli_Xtt",
"(",
"x",
",",
"rhol",
",",
"rhog",
",",
"mul",
",",
"mug",
")",
"return",
"(",
"1",
"+",
"1.",
"/",
"Ft",
"+",
"Xtt",
")",
"**",
"-",
"0.321"
]
| 35.048387 | 24.080645 |
def index(self, value):
"""
Return index of *value* in self.
Raises ValueError if *value* is not found.
"""
# pylint: disable=arguments-differ
for idx, val in enumerate(self):
if value == val:
return idx
raise ValueError('{0!r} is not in dict'.format(value)) | [
"def",
"index",
"(",
"self",
",",
"value",
")",
":",
"# pylint: disable=arguments-differ",
"for",
"idx",
",",
"val",
"in",
"enumerate",
"(",
"self",
")",
":",
"if",
"value",
"==",
"val",
":",
"return",
"idx",
"raise",
"ValueError",
"(",
"'{0!r} is not in dict'",
".",
"format",
"(",
"value",
")",
")"
]
| 30.272727 | 10.636364 |
def generate_uuid():
"""Generate a UUID."""
r_uuid = base64.urlsafe_b64encode(uuid.uuid4().bytes)
return r_uuid.decode().replace('=', '') | [
"def",
"generate_uuid",
"(",
")",
":",
"r_uuid",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"uuid",
".",
"uuid4",
"(",
")",
".",
"bytes",
")",
"return",
"r_uuid",
".",
"decode",
"(",
")",
".",
"replace",
"(",
"'='",
",",
"''",
")"
]
| 36.5 | 10 |
def sysidpath(ignore_options=False):
""" get a unique identifier for the machine running this function """
# in the event we have to make our own
# this should not be passed in a as a parameter
# since we need these definitions to be more or less static
failover = Path('/tmp/machine-id')
if not ignore_options:
options = (
Path('/etc/machine-id'),
failover, # always read to see if we somehow managed to persist this
)
for option in options:
if (option.exists() and
os.access(option, os.R_OK) and
option.stat().st_size > 0):
return option
uuid = uuid4()
with open(failover, 'wt') as f:
f.write(uuid.hex)
return failover | [
"def",
"sysidpath",
"(",
"ignore_options",
"=",
"False",
")",
":",
"# in the event we have to make our own",
"# this should not be passed in a as a parameter",
"# since we need these definitions to be more or less static",
"failover",
"=",
"Path",
"(",
"'/tmp/machine-id'",
")",
"if",
"not",
"ignore_options",
":",
"options",
"=",
"(",
"Path",
"(",
"'/etc/machine-id'",
")",
",",
"failover",
",",
"# always read to see if we somehow managed to persist this",
")",
"for",
"option",
"in",
"options",
":",
"if",
"(",
"option",
".",
"exists",
"(",
")",
"and",
"os",
".",
"access",
"(",
"option",
",",
"os",
".",
"R_OK",
")",
"and",
"option",
".",
"stat",
"(",
")",
".",
"st_size",
">",
"0",
")",
":",
"return",
"option",
"uuid",
"=",
"uuid4",
"(",
")",
"with",
"open",
"(",
"failover",
",",
"'wt'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"uuid",
".",
"hex",
")",
"return",
"failover"
]
| 32.956522 | 15.956522 |
def __decorate_axis(axis, ax_type):
'''Configure axis tickers, locators, and labels'''
if ax_type == 'tonnetz':
axis.set_major_formatter(TonnetzFormatter())
axis.set_major_locator(FixedLocator(0.5 + np.arange(6)))
axis.set_label_text('Tonnetz')
elif ax_type == 'chroma':
axis.set_major_formatter(ChromaFormatter())
axis.set_major_locator(FixedLocator(0.5 +
np.add.outer(12 * np.arange(10),
[0, 2, 4, 5, 7, 9, 11]).ravel()))
axis.set_label_text('Pitch class')
elif ax_type == 'tempo':
axis.set_major_formatter(ScalarFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_label_text('BPM')
elif ax_type == 'time':
axis.set_major_formatter(TimeFormatter(unit=None, lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time')
elif ax_type == 's':
axis.set_major_formatter(TimeFormatter(unit='s', lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time (s)')
elif ax_type == 'ms':
axis.set_major_formatter(TimeFormatter(unit='ms', lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time (ms)')
elif ax_type == 'lag':
axis.set_major_formatter(TimeFormatter(unit=None, lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag')
elif ax_type == 'lag_s':
axis.set_major_formatter(TimeFormatter(unit='s', lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag (s)')
elif ax_type == 'lag_ms':
axis.set_major_formatter(TimeFormatter(unit='ms', lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag (ms)')
elif ax_type == 'cqt_note':
axis.set_major_formatter(NoteFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_minor_formatter(NoteFormatter(major=False))
axis.set_minor_locator(LogLocator(base=2.0,
subs=2.0**(np.arange(1, 12)/12.0)))
axis.set_label_text('Note')
elif ax_type in ['cqt_hz']:
axis.set_major_formatter(LogHzFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_minor_formatter(LogHzFormatter(major=False))
axis.set_minor_locator(LogLocator(base=2.0,
subs=2.0**(np.arange(1, 12)/12.0)))
axis.set_label_text('Hz')
elif ax_type in ['mel', 'log']:
axis.set_major_formatter(ScalarFormatter())
axis.set_major_locator(SymmetricalLogLocator(axis.get_transform()))
axis.set_label_text('Hz')
elif ax_type in ['linear', 'hz']:
axis.set_major_formatter(ScalarFormatter())
axis.set_label_text('Hz')
elif ax_type in ['frames']:
axis.set_label_text('Frames')
elif ax_type in ['off', 'none', None]:
axis.set_label_text('')
axis.set_ticks([]) | [
"def",
"__decorate_axis",
"(",
"axis",
",",
"ax_type",
")",
":",
"if",
"ax_type",
"==",
"'tonnetz'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TonnetzFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"FixedLocator",
"(",
"0.5",
"+",
"np",
".",
"arange",
"(",
"6",
")",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Tonnetz'",
")",
"elif",
"ax_type",
"==",
"'chroma'",
":",
"axis",
".",
"set_major_formatter",
"(",
"ChromaFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"FixedLocator",
"(",
"0.5",
"+",
"np",
".",
"add",
".",
"outer",
"(",
"12",
"*",
"np",
".",
"arange",
"(",
"10",
")",
",",
"[",
"0",
",",
"2",
",",
"4",
",",
"5",
",",
"7",
",",
"9",
",",
"11",
"]",
")",
".",
"ravel",
"(",
")",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Pitch class'",
")",
"elif",
"ax_type",
"==",
"'tempo'",
":",
"axis",
".",
"set_major_formatter",
"(",
"ScalarFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"LogLocator",
"(",
"base",
"=",
"2.0",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'BPM'",
")",
"elif",
"ax_type",
"==",
"'time'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"None",
",",
"lag",
"=",
"False",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Time'",
")",
"elif",
"ax_type",
"==",
"'s'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"'s'",
",",
"lag",
"=",
"False",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Time (s)'",
")",
"elif",
"ax_type",
"==",
"'ms'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"'ms'",
",",
"lag",
"=",
"False",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Time (ms)'",
")",
"elif",
"ax_type",
"==",
"'lag'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"None",
",",
"lag",
"=",
"True",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Lag'",
")",
"elif",
"ax_type",
"==",
"'lag_s'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"'s'",
",",
"lag",
"=",
"True",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Lag (s)'",
")",
"elif",
"ax_type",
"==",
"'lag_ms'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"'ms'",
",",
"lag",
"=",
"True",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Lag (ms)'",
")",
"elif",
"ax_type",
"==",
"'cqt_note'",
":",
"axis",
".",
"set_major_formatter",
"(",
"NoteFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"LogLocator",
"(",
"base",
"=",
"2.0",
")",
")",
"axis",
".",
"set_minor_formatter",
"(",
"NoteFormatter",
"(",
"major",
"=",
"False",
")",
")",
"axis",
".",
"set_minor_locator",
"(",
"LogLocator",
"(",
"base",
"=",
"2.0",
",",
"subs",
"=",
"2.0",
"**",
"(",
"np",
".",
"arange",
"(",
"1",
",",
"12",
")",
"/",
"12.0",
")",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Note'",
")",
"elif",
"ax_type",
"in",
"[",
"'cqt_hz'",
"]",
":",
"axis",
".",
"set_major_formatter",
"(",
"LogHzFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"LogLocator",
"(",
"base",
"=",
"2.0",
")",
")",
"axis",
".",
"set_minor_formatter",
"(",
"LogHzFormatter",
"(",
"major",
"=",
"False",
")",
")",
"axis",
".",
"set_minor_locator",
"(",
"LogLocator",
"(",
"base",
"=",
"2.0",
",",
"subs",
"=",
"2.0",
"**",
"(",
"np",
".",
"arange",
"(",
"1",
",",
"12",
")",
"/",
"12.0",
")",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Hz'",
")",
"elif",
"ax_type",
"in",
"[",
"'mel'",
",",
"'log'",
"]",
":",
"axis",
".",
"set_major_formatter",
"(",
"ScalarFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"SymmetricalLogLocator",
"(",
"axis",
".",
"get_transform",
"(",
")",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Hz'",
")",
"elif",
"ax_type",
"in",
"[",
"'linear'",
",",
"'hz'",
"]",
":",
"axis",
".",
"set_major_formatter",
"(",
"ScalarFormatter",
"(",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Hz'",
")",
"elif",
"ax_type",
"in",
"[",
"'frames'",
"]",
":",
"axis",
".",
"set_label_text",
"(",
"'Frames'",
")",
"elif",
"ax_type",
"in",
"[",
"'off'",
",",
"'none'",
",",
"None",
"]",
":",
"axis",
".",
"set_label_text",
"(",
"''",
")",
"axis",
".",
"set_ticks",
"(",
"[",
"]",
")"
]
| 40.183908 | 19.287356 |
def pypi_render(source):
"""
Copied (and slightly adapted) from pypi.description_tools
"""
ALLOWED_SCHEMES = '''file ftp gopher hdl http https imap mailto mms news
nntp prospero rsync rtsp rtspu sftp shttp sip sips snews svn svn+ssh
telnet wais irc'''.split()
settings_overrides = {
"raw_enabled": 0, # no raw HTML code
"file_insertion_enabled": 0, # no file/URL access
"halt_level": 2, # at warnings or errors, raise an exception
"report_level": 5, # never report problems with the reST code
}
# capture publishing errors, they go to stderr
old_stderr = sys.stderr
sys.stderr = s = StringIO.StringIO()
parts = None
try:
# Convert reStructuredText to HTML using Docutils.
document = publish_doctree(source=source,
settings_overrides=settings_overrides)
for node in document.traverse():
if node.tagname == '#text':
continue
if node.hasattr('refuri'):
uri = node['refuri']
elif node.hasattr('uri'):
uri = node['uri']
else:
continue
o = urlparse.urlparse(uri)
if o.scheme not in ALLOWED_SCHEMES:
raise TransformError('link scheme not allowed')
# now turn the transformed document into HTML
reader = readers.doctree.Reader(parser_name='null')
pub = Publisher(reader, source=io.DocTreeInput(document),
destination_class=io.StringOutput)
pub.set_writer('html')
pub.process_programmatic_settings(None, settings_overrides, None)
pub.set_destination(None, None)
pub.publish()
parts = pub.writer.parts
except:
pass
sys.stderr = old_stderr
# original text if publishing errors occur
if parts is None or len(s.getvalue()) > 0:
return None
else:
return parts['body'] | [
"def",
"pypi_render",
"(",
"source",
")",
":",
"ALLOWED_SCHEMES",
"=",
"'''file ftp gopher hdl http https imap mailto mms news\n nntp prospero rsync rtsp rtspu sftp shttp sip sips snews svn svn+ssh\n telnet wais irc'''",
".",
"split",
"(",
")",
"settings_overrides",
"=",
"{",
"\"raw_enabled\"",
":",
"0",
",",
"# no raw HTML code",
"\"file_insertion_enabled\"",
":",
"0",
",",
"# no file/URL access",
"\"halt_level\"",
":",
"2",
",",
"# at warnings or errors, raise an exception",
"\"report_level\"",
":",
"5",
",",
"# never report problems with the reST code",
"}",
"# capture publishing errors, they go to stderr",
"old_stderr",
"=",
"sys",
".",
"stderr",
"sys",
".",
"stderr",
"=",
"s",
"=",
"StringIO",
".",
"StringIO",
"(",
")",
"parts",
"=",
"None",
"try",
":",
"# Convert reStructuredText to HTML using Docutils.",
"document",
"=",
"publish_doctree",
"(",
"source",
"=",
"source",
",",
"settings_overrides",
"=",
"settings_overrides",
")",
"for",
"node",
"in",
"document",
".",
"traverse",
"(",
")",
":",
"if",
"node",
".",
"tagname",
"==",
"'#text'",
":",
"continue",
"if",
"node",
".",
"hasattr",
"(",
"'refuri'",
")",
":",
"uri",
"=",
"node",
"[",
"'refuri'",
"]",
"elif",
"node",
".",
"hasattr",
"(",
"'uri'",
")",
":",
"uri",
"=",
"node",
"[",
"'uri'",
"]",
"else",
":",
"continue",
"o",
"=",
"urlparse",
".",
"urlparse",
"(",
"uri",
")",
"if",
"o",
".",
"scheme",
"not",
"in",
"ALLOWED_SCHEMES",
":",
"raise",
"TransformError",
"(",
"'link scheme not allowed'",
")",
"# now turn the transformed document into HTML",
"reader",
"=",
"readers",
".",
"doctree",
".",
"Reader",
"(",
"parser_name",
"=",
"'null'",
")",
"pub",
"=",
"Publisher",
"(",
"reader",
",",
"source",
"=",
"io",
".",
"DocTreeInput",
"(",
"document",
")",
",",
"destination_class",
"=",
"io",
".",
"StringOutput",
")",
"pub",
".",
"set_writer",
"(",
"'html'",
")",
"pub",
".",
"process_programmatic_settings",
"(",
"None",
",",
"settings_overrides",
",",
"None",
")",
"pub",
".",
"set_destination",
"(",
"None",
",",
"None",
")",
"pub",
".",
"publish",
"(",
")",
"parts",
"=",
"pub",
".",
"writer",
".",
"parts",
"except",
":",
"pass",
"sys",
".",
"stderr",
"=",
"old_stderr",
"# original text if publishing errors occur",
"if",
"parts",
"is",
"None",
"or",
"len",
"(",
"s",
".",
"getvalue",
"(",
")",
")",
">",
"0",
":",
"return",
"None",
"else",
":",
"return",
"parts",
"[",
"'body'",
"]"
]
| 32.896552 | 18.37931 |
def lies_under(self, prefix):
"""Indicates if the `prefix` is a parent of this path.
"""
orig_list = self.norm_case()._components()
pref_list = self.__class__(prefix).norm_case()._components()
return (len(orig_list) >= len(pref_list) and
orig_list[:len(pref_list)] == pref_list) | [
"def",
"lies_under",
"(",
"self",
",",
"prefix",
")",
":",
"orig_list",
"=",
"self",
".",
"norm_case",
"(",
")",
".",
"_components",
"(",
")",
"pref_list",
"=",
"self",
".",
"__class__",
"(",
"prefix",
")",
".",
"norm_case",
"(",
")",
".",
"_components",
"(",
")",
"return",
"(",
"len",
"(",
"orig_list",
")",
">=",
"len",
"(",
"pref_list",
")",
"and",
"orig_list",
"[",
":",
"len",
"(",
"pref_list",
")",
"]",
"==",
"pref_list",
")"
]
| 41 | 14.625 |
def _set_properties(self):
"""Setup title, size and tooltips"""
self.codetext_ctrl.SetToolTipString(_("Enter python code here."))
self.apply_button.SetToolTipString(_("Apply changes to current macro"))
self.splitter.SetBackgroundStyle(wx.BG_STYLE_COLOUR)
self.result_ctrl.SetMinSize((10, 10)) | [
"def",
"_set_properties",
"(",
"self",
")",
":",
"self",
".",
"codetext_ctrl",
".",
"SetToolTipString",
"(",
"_",
"(",
"\"Enter python code here.\"",
")",
")",
"self",
".",
"apply_button",
".",
"SetToolTipString",
"(",
"_",
"(",
"\"Apply changes to current macro\"",
")",
")",
"self",
".",
"splitter",
".",
"SetBackgroundStyle",
"(",
"wx",
".",
"BG_STYLE_COLOUR",
")",
"self",
".",
"result_ctrl",
".",
"SetMinSize",
"(",
"(",
"10",
",",
"10",
")",
")"
]
| 46.714286 | 21.571429 |
def _find_realname(self, post_input):
""" Returns the most appropriate name to identify the user """
# First, try the full name
if "lis_person_name_full" in post_input:
return post_input["lis_person_name_full"]
if "lis_person_name_given" in post_input and "lis_person_name_family" in post_input:
return post_input["lis_person_name_given"] + post_input["lis_person_name_family"]
# Then the email
if "lis_person_contact_email_primary" in post_input:
return post_input["lis_person_contact_email_primary"]
# Then only part of the full name
if "lis_person_name_family" in post_input:
return post_input["lis_person_name_family"]
if "lis_person_name_given" in post_input:
return post_input["lis_person_name_given"]
return post_input["user_id"] | [
"def",
"_find_realname",
"(",
"self",
",",
"post_input",
")",
":",
"# First, try the full name",
"if",
"\"lis_person_name_full\"",
"in",
"post_input",
":",
"return",
"post_input",
"[",
"\"lis_person_name_full\"",
"]",
"if",
"\"lis_person_name_given\"",
"in",
"post_input",
"and",
"\"lis_person_name_family\"",
"in",
"post_input",
":",
"return",
"post_input",
"[",
"\"lis_person_name_given\"",
"]",
"+",
"post_input",
"[",
"\"lis_person_name_family\"",
"]",
"# Then the email",
"if",
"\"lis_person_contact_email_primary\"",
"in",
"post_input",
":",
"return",
"post_input",
"[",
"\"lis_person_contact_email_primary\"",
"]",
"# Then only part of the full name",
"if",
"\"lis_person_name_family\"",
"in",
"post_input",
":",
"return",
"post_input",
"[",
"\"lis_person_name_family\"",
"]",
"if",
"\"lis_person_name_given\"",
"in",
"post_input",
":",
"return",
"post_input",
"[",
"\"lis_person_name_given\"",
"]",
"return",
"post_input",
"[",
"\"user_id\"",
"]"
]
| 43.05 | 20.45 |
def calculate(self, batch_info):
""" Calculate value of a metric """
value = self._value_function(batch_info['data'], batch_info['target'], batch_info['output'])
self.storage.append(value) | [
"def",
"calculate",
"(",
"self",
",",
"batch_info",
")",
":",
"value",
"=",
"self",
".",
"_value_function",
"(",
"batch_info",
"[",
"'data'",
"]",
",",
"batch_info",
"[",
"'target'",
"]",
",",
"batch_info",
"[",
"'output'",
"]",
")",
"self",
".",
"storage",
".",
"append",
"(",
"value",
")"
]
| 52.25 | 18.5 |
def rcParams(self):
"""
Return rcParams dict for this theme.
Notes
-----
Subclasses should not need to override this method method as long as
self._rcParams is constructed properly.
rcParams are used during plotting. Sometimes the same theme can be
achieved by setting rcParams before plotting or a apply
after plotting. The choice of how to implement it is is a matter of
convenience in that case.
There are certain things can only be themed after plotting. There
may not be an rcParam to control the theme or the act of plotting
may cause an entity to come into existence before it can be themed.
"""
try:
rcParams = deepcopy(self._rcParams)
except NotImplementedError:
# deepcopy raises an error for objects that are drived from or
# composed of matplotlib.transform.TransformNode.
# Not desirable, but probably requires upstream fix.
# In particular, XKCD uses matplotlib.patheffects.withStrok
rcParams = copy(self._rcParams)
for th in self.themeables.values():
rcParams.update(th.rcParams)
return rcParams | [
"def",
"rcParams",
"(",
"self",
")",
":",
"try",
":",
"rcParams",
"=",
"deepcopy",
"(",
"self",
".",
"_rcParams",
")",
"except",
"NotImplementedError",
":",
"# deepcopy raises an error for objects that are drived from or",
"# composed of matplotlib.transform.TransformNode.",
"# Not desirable, but probably requires upstream fix.",
"# In particular, XKCD uses matplotlib.patheffects.withStrok",
"rcParams",
"=",
"copy",
"(",
"self",
".",
"_rcParams",
")",
"for",
"th",
"in",
"self",
".",
"themeables",
".",
"values",
"(",
")",
":",
"rcParams",
".",
"update",
"(",
"th",
".",
"rcParams",
")",
"return",
"rcParams"
]
| 37.90625 | 22.96875 |
def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None):
""" Prepare models for training """
if model_state is not None:
self.model.load_state_dict(model_state)
else:
self.model.reset_weights()
self.algo.initialize(
training_info=training_info, model=self.model, environment=self.env_roller.environment, device=self.device
) | [
"def",
"initialize_training",
"(",
"self",
",",
"training_info",
":",
"TrainingInfo",
",",
"model_state",
"=",
"None",
",",
"hidden_state",
"=",
"None",
")",
":",
"if",
"model_state",
"is",
"not",
"None",
":",
"self",
".",
"model",
".",
"load_state_dict",
"(",
"model_state",
")",
"else",
":",
"self",
".",
"model",
".",
"reset_weights",
"(",
")",
"self",
".",
"algo",
".",
"initialize",
"(",
"training_info",
"=",
"training_info",
",",
"model",
"=",
"self",
".",
"model",
",",
"environment",
"=",
"self",
".",
"env_roller",
".",
"environment",
",",
"device",
"=",
"self",
".",
"device",
")"
]
| 43.2 | 26.1 |
def ts(self, n):
"""
:param n: number of charge
:return: when to shoot nth charge, milliseconds
"""
try:
root1, root2 = solve_quadratic(self.slope / 2.0, self.minrps, -n)
except ZeroDivisionError:
root2 = float(n) / self.minrps
return int(root2 * 1000) | [
"def",
"ts",
"(",
"self",
",",
"n",
")",
":",
"try",
":",
"root1",
",",
"root2",
"=",
"solve_quadratic",
"(",
"self",
".",
"slope",
"/",
"2.0",
",",
"self",
".",
"minrps",
",",
"-",
"n",
")",
"except",
"ZeroDivisionError",
":",
"root2",
"=",
"float",
"(",
"n",
")",
"/",
"self",
".",
"minrps",
"return",
"int",
"(",
"root2",
"*",
"1000",
")"
]
| 32.3 | 12.7 |
def log_raise(log, err_str, err_type=RuntimeError):
"""Log an error message and raise an error.
Arguments
---------
log : `logging.Logger` object
err_str : str
Error message to be logged and raised.
err_type : `Exception` object
Type of error to raise.
"""
log.error(err_str)
# Make sure output is flushed
# (happens automatically to `StreamHandlers`, but not `FileHandlers`)
for handle in log.handlers:
handle.flush()
# Raise given error
raise err_type(err_str) | [
"def",
"log_raise",
"(",
"log",
",",
"err_str",
",",
"err_type",
"=",
"RuntimeError",
")",
":",
"log",
".",
"error",
"(",
"err_str",
")",
"# Make sure output is flushed",
"# (happens automatically to `StreamHandlers`, but not `FileHandlers`)",
"for",
"handle",
"in",
"log",
".",
"handlers",
":",
"handle",
".",
"flush",
"(",
")",
"# Raise given error",
"raise",
"err_type",
"(",
"err_str",
")"
]
| 27.473684 | 16.421053 |
def arg_spec(cls, mtd_name):
"""Cross-version argument signature inspection
Parameters
----------
cls : class
mtd_name : str
Name of the method to be inspected
Returns
-------
required_params : list of str
List of required, positional parameters
optional_params : list of str
List of optional parameters, i.e. parameters with a default value
"""
mtd = getattr(cls, mtd_name)
required_params = []
optional_params = []
if hasattr(inspect, 'signature'): # Python 3
params = inspect.signature(mtd).parameters # pylint: disable=no-member
for k in params.keys():
if params[k].default == inspect.Parameter.empty: # pylint: disable=no-member
# Python 3 does not make a difference between unbound methods and functions, so the
# only way to distinguish if the first argument is of a regular method, or a class
# method, is to look for the conventional argument name. Yikes.
if not (params[k].name == 'self' or params[k].name == 'cls'):
required_params.append(k)
else:
optional_params.append(k)
else: # Python 2
params = inspect.getargspec(mtd) # pylint: disable=deprecated-method
num = len(params[0]) if params[0] else 0
n_opt = len(params[3]) if params[3] else 0
n_req = (num - n_opt) if n_opt <= num else 0
for i in range(0, n_req):
required_params.append(params[0][i])
for i in range(n_req, num):
optional_params.append(params[0][i])
if inspect.isroutine(getattr(cls, mtd_name)):
bound_mtd = cls.__dict__[mtd_name]
if not isinstance(bound_mtd, staticmethod):
del required_params[0]
return required_params, optional_params | [
"def",
"arg_spec",
"(",
"cls",
",",
"mtd_name",
")",
":",
"mtd",
"=",
"getattr",
"(",
"cls",
",",
"mtd_name",
")",
"required_params",
"=",
"[",
"]",
"optional_params",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"inspect",
",",
"'signature'",
")",
":",
"# Python 3",
"params",
"=",
"inspect",
".",
"signature",
"(",
"mtd",
")",
".",
"parameters",
"# pylint: disable=no-member",
"for",
"k",
"in",
"params",
".",
"keys",
"(",
")",
":",
"if",
"params",
"[",
"k",
"]",
".",
"default",
"==",
"inspect",
".",
"Parameter",
".",
"empty",
":",
"# pylint: disable=no-member",
"# Python 3 does not make a difference between unbound methods and functions, so the",
"# only way to distinguish if the first argument is of a regular method, or a class",
"# method, is to look for the conventional argument name. Yikes.",
"if",
"not",
"(",
"params",
"[",
"k",
"]",
".",
"name",
"==",
"'self'",
"or",
"params",
"[",
"k",
"]",
".",
"name",
"==",
"'cls'",
")",
":",
"required_params",
".",
"append",
"(",
"k",
")",
"else",
":",
"optional_params",
".",
"append",
"(",
"k",
")",
"else",
":",
"# Python 2",
"params",
"=",
"inspect",
".",
"getargspec",
"(",
"mtd",
")",
"# pylint: disable=deprecated-method",
"num",
"=",
"len",
"(",
"params",
"[",
"0",
"]",
")",
"if",
"params",
"[",
"0",
"]",
"else",
"0",
"n_opt",
"=",
"len",
"(",
"params",
"[",
"3",
"]",
")",
"if",
"params",
"[",
"3",
"]",
"else",
"0",
"n_req",
"=",
"(",
"num",
"-",
"n_opt",
")",
"if",
"n_opt",
"<=",
"num",
"else",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"n_req",
")",
":",
"required_params",
".",
"append",
"(",
"params",
"[",
"0",
"]",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"n_req",
",",
"num",
")",
":",
"optional_params",
".",
"append",
"(",
"params",
"[",
"0",
"]",
"[",
"i",
"]",
")",
"if",
"inspect",
".",
"isroutine",
"(",
"getattr",
"(",
"cls",
",",
"mtd_name",
")",
")",
":",
"bound_mtd",
"=",
"cls",
".",
"__dict__",
"[",
"mtd_name",
"]",
"if",
"not",
"isinstance",
"(",
"bound_mtd",
",",
"staticmethod",
")",
":",
"del",
"required_params",
"[",
"0",
"]",
"return",
"required_params",
",",
"optional_params"
]
| 37.285714 | 20.938776 |
def _pypsa_generator_timeseries_aggregated_at_lv_station(network, timesteps):
"""
Aggregates generator time series per generator subtype and LV grid.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
Returns
-------
tuple of :pandas:`pandas.DataFrame<dataframe>`
Tuple of size two containing DataFrames that represent
1. 'p_set' of aggregated Generation per subtype at each LV station
2. 'q_set' of aggregated Generation per subtype at each LV station
"""
generation_p = []
generation_q = []
for lv_grid in network.mv_grid.lv_grids:
# Determine aggregated generation at LV stations
generation = {}
for gen in lv_grid.generators:
# for type in gen.type:
# for subtype in gen.subtype:
gen_name = '_'.join([gen.type,
gen.subtype,
'aggregated',
'LV_grid',
str(lv_grid.id)])
generation.setdefault(gen.type, {})
generation[gen.type].setdefault(gen.subtype, {})
generation[gen.type][gen.subtype].setdefault('timeseries_p', [])
generation[gen.type][gen.subtype].setdefault('timeseries_q', [])
generation[gen.type][gen.subtype]['timeseries_p'].append(
gen.pypsa_timeseries('p').rename(gen_name).to_frame().loc[
timesteps])
generation[gen.type][gen.subtype]['timeseries_q'].append(
gen.pypsa_timeseries('q').rename(gen_name).to_frame().loc[
timesteps])
for k_type, v_type in generation.items():
for k_type, v_subtype in v_type.items():
col_name = v_subtype['timeseries_p'][0].columns[0]
generation_p.append(
pd.concat(v_subtype['timeseries_p'],
axis=1).sum(axis=1).rename(col_name).to_frame())
generation_q.append(
pd.concat(v_subtype['timeseries_q'], axis=1).sum(
axis=1).rename(col_name).to_frame())
return generation_p, generation_q | [
"def",
"_pypsa_generator_timeseries_aggregated_at_lv_station",
"(",
"network",
",",
"timesteps",
")",
":",
"generation_p",
"=",
"[",
"]",
"generation_q",
"=",
"[",
"]",
"for",
"lv_grid",
"in",
"network",
".",
"mv_grid",
".",
"lv_grids",
":",
"# Determine aggregated generation at LV stations",
"generation",
"=",
"{",
"}",
"for",
"gen",
"in",
"lv_grid",
".",
"generators",
":",
"# for type in gen.type:",
"# for subtype in gen.subtype:",
"gen_name",
"=",
"'_'",
".",
"join",
"(",
"[",
"gen",
".",
"type",
",",
"gen",
".",
"subtype",
",",
"'aggregated'",
",",
"'LV_grid'",
",",
"str",
"(",
"lv_grid",
".",
"id",
")",
"]",
")",
"generation",
".",
"setdefault",
"(",
"gen",
".",
"type",
",",
"{",
"}",
")",
"generation",
"[",
"gen",
".",
"type",
"]",
".",
"setdefault",
"(",
"gen",
".",
"subtype",
",",
"{",
"}",
")",
"generation",
"[",
"gen",
".",
"type",
"]",
"[",
"gen",
".",
"subtype",
"]",
".",
"setdefault",
"(",
"'timeseries_p'",
",",
"[",
"]",
")",
"generation",
"[",
"gen",
".",
"type",
"]",
"[",
"gen",
".",
"subtype",
"]",
".",
"setdefault",
"(",
"'timeseries_q'",
",",
"[",
"]",
")",
"generation",
"[",
"gen",
".",
"type",
"]",
"[",
"gen",
".",
"subtype",
"]",
"[",
"'timeseries_p'",
"]",
".",
"append",
"(",
"gen",
".",
"pypsa_timeseries",
"(",
"'p'",
")",
".",
"rename",
"(",
"gen_name",
")",
".",
"to_frame",
"(",
")",
".",
"loc",
"[",
"timesteps",
"]",
")",
"generation",
"[",
"gen",
".",
"type",
"]",
"[",
"gen",
".",
"subtype",
"]",
"[",
"'timeseries_q'",
"]",
".",
"append",
"(",
"gen",
".",
"pypsa_timeseries",
"(",
"'q'",
")",
".",
"rename",
"(",
"gen_name",
")",
".",
"to_frame",
"(",
")",
".",
"loc",
"[",
"timesteps",
"]",
")",
"for",
"k_type",
",",
"v_type",
"in",
"generation",
".",
"items",
"(",
")",
":",
"for",
"k_type",
",",
"v_subtype",
"in",
"v_type",
".",
"items",
"(",
")",
":",
"col_name",
"=",
"v_subtype",
"[",
"'timeseries_p'",
"]",
"[",
"0",
"]",
".",
"columns",
"[",
"0",
"]",
"generation_p",
".",
"append",
"(",
"pd",
".",
"concat",
"(",
"v_subtype",
"[",
"'timeseries_p'",
"]",
",",
"axis",
"=",
"1",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"rename",
"(",
"col_name",
")",
".",
"to_frame",
"(",
")",
")",
"generation_q",
".",
"append",
"(",
"pd",
".",
"concat",
"(",
"v_subtype",
"[",
"'timeseries_q'",
"]",
",",
"axis",
"=",
"1",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
".",
"rename",
"(",
"col_name",
")",
".",
"to_frame",
"(",
")",
")",
"return",
"generation_p",
",",
"generation_q"
]
| 40.766667 | 21.666667 |
def move_to_step(self, step):
"""
Use in cases when you need to move in given step depending on input
"""
if step not in self._scenario_steps.keys():
raise UndefinedState("step {} not defined in scenario".format(step))
try:
session_id = session.sessionId
self.session_machines.set_state(session_id, step)
except UninitializedStateMachine as e:
logger.error(e)
return statement(INTERNAL_ERROR_MSG) | [
"def",
"move_to_step",
"(",
"self",
",",
"step",
")",
":",
"if",
"step",
"not",
"in",
"self",
".",
"_scenario_steps",
".",
"keys",
"(",
")",
":",
"raise",
"UndefinedState",
"(",
"\"step {} not defined in scenario\"",
".",
"format",
"(",
"step",
")",
")",
"try",
":",
"session_id",
"=",
"session",
".",
"sessionId",
"self",
".",
"session_machines",
".",
"set_state",
"(",
"session_id",
",",
"step",
")",
"except",
"UninitializedStateMachine",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"e",
")",
"return",
"statement",
"(",
"INTERNAL_ERROR_MSG",
")"
]
| 41.083333 | 14.583333 |
def fix_header_comment(filename, timestamp):
"""Fixes the header-comment of the given file."""
# Fix input file.
name = os.path.basename( filename )
for line in fileinput.input( filename, inplace=1, mode="rU" ):
# If header-comment already contains anything for '$Id$', remove it.
line = re.sub(r'\$Id:[^$]+\$', r'$Id$', line.rstrip())
# Replace '$Id$' by a string containing the file's name (and a timestamp)!
line = re.sub(re.escape(r'$Id$'), r'$Id: ' + name + r' ' + timestamp.isoformat() + r' $', line.rstrip())
print(line) | [
"def",
"fix_header_comment",
"(",
"filename",
",",
"timestamp",
")",
":",
"# Fix input file.",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"for",
"line",
"in",
"fileinput",
".",
"input",
"(",
"filename",
",",
"inplace",
"=",
"1",
",",
"mode",
"=",
"\"rU\"",
")",
":",
"# If header-comment already contains anything for '$Id$', remove it.",
"line",
"=",
"re",
".",
"sub",
"(",
"r'\\$Id:[^$]+\\$'",
",",
"r'$Id$'",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"# Replace '$Id$' by a string containing the file's name (and a timestamp)!",
"line",
"=",
"re",
".",
"sub",
"(",
"re",
".",
"escape",
"(",
"r'$Id$'",
")",
",",
"r'$Id: '",
"+",
"name",
"+",
"r' '",
"+",
"timestamp",
".",
"isoformat",
"(",
")",
"+",
"r' $'",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"print",
"(",
"line",
")"
]
| 57.4 | 24.3 |
def ektnam(n, lenout=_default_len_out):
"""
Return the name of a specified, loaded table.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ektnam_c.html
:param n: Index of table.
:type n: int
:param lenout: Maximum table name length.
:type lenout: int
:return: Name of table.
:rtype: str
"""
n = ctypes.c_int(n)
lenout = ctypes.c_int(lenout)
table = stypes.stringToCharP(lenout)
libspice.ektnam_c(n, lenout, table)
return stypes.toPythonString(table) | [
"def",
"ektnam",
"(",
"n",
",",
"lenout",
"=",
"_default_len_out",
")",
":",
"n",
"=",
"ctypes",
".",
"c_int",
"(",
"n",
")",
"lenout",
"=",
"ctypes",
".",
"c_int",
"(",
"lenout",
")",
"table",
"=",
"stypes",
".",
"stringToCharP",
"(",
"lenout",
")",
"libspice",
".",
"ektnam_c",
"(",
"n",
",",
"lenout",
",",
"table",
")",
"return",
"stypes",
".",
"toPythonString",
"(",
"table",
")"
]
| 27.888889 | 13.666667 |
def _validate_request(endpoint, file_type='json', data=None, params=None):
"""
Validate request before calling API
:param endpoint: API endpoint
:param file_type: file type requested
:param data: payload
:param params: HTTP parameters
"""
if not isinstance(endpoint, string_types) or endpoint.strip() == '':
raise ClientException("Must submit `endpoint` for DHIS2 API")
if not isinstance(file_type, string_types) or file_type.lower() not in ('json', 'csv', 'xml', 'pdf', 'xlsx'):
raise ClientException("Invalid file_type: {}".format(file_type))
if params:
if not isinstance(params, (dict, list)):
raise ClientException("`params` must be a dict or list of tuples, not {}".format(params.__class__.__name__))
if isinstance(params, list) and not all([isinstance(elem, tuple) for elem in params]):
raise ClientException("`params` list must all be tuples")
if data and not isinstance(data, dict):
raise ClientException("`data` must be a dict, not {}".format(data.__class__.__name__)) | [
"def",
"_validate_request",
"(",
"endpoint",
",",
"file_type",
"=",
"'json'",
",",
"data",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"endpoint",
",",
"string_types",
")",
"or",
"endpoint",
".",
"strip",
"(",
")",
"==",
"''",
":",
"raise",
"ClientException",
"(",
"\"Must submit `endpoint` for DHIS2 API\"",
")",
"if",
"not",
"isinstance",
"(",
"file_type",
",",
"string_types",
")",
"or",
"file_type",
".",
"lower",
"(",
")",
"not",
"in",
"(",
"'json'",
",",
"'csv'",
",",
"'xml'",
",",
"'pdf'",
",",
"'xlsx'",
")",
":",
"raise",
"ClientException",
"(",
"\"Invalid file_type: {}\"",
".",
"format",
"(",
"file_type",
")",
")",
"if",
"params",
":",
"if",
"not",
"isinstance",
"(",
"params",
",",
"(",
"dict",
",",
"list",
")",
")",
":",
"raise",
"ClientException",
"(",
"\"`params` must be a dict or list of tuples, not {}\"",
".",
"format",
"(",
"params",
".",
"__class__",
".",
"__name__",
")",
")",
"if",
"isinstance",
"(",
"params",
",",
"list",
")",
"and",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"elem",
",",
"tuple",
")",
"for",
"elem",
"in",
"params",
"]",
")",
":",
"raise",
"ClientException",
"(",
"\"`params` list must all be tuples\"",
")",
"if",
"data",
"and",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"raise",
"ClientException",
"(",
"\"`data` must be a dict, not {}\"",
".",
"format",
"(",
"data",
".",
"__class__",
".",
"__name__",
")",
")"
]
| 59.947368 | 27.105263 |
def graph_from_file(filename, bidirectional=False, simplify=True,
retain_all=False, name='unnamed'):
"""
Create a networkx graph from OSM data in an XML file.
Parameters
----------
filename : string
the name of a file containing OSM XML data
bidirectional : bool
if True, create bidirectional edges for one-way streets
simplify : bool
if True, simplify the graph topology
retain_all : bool
if True, return the entire graph even if it is not connected
name : string
the name of the graph
Returns
-------
networkx multidigraph
"""
# transmogrify file of OSM XML data into JSON
response_jsons = [overpass_json_from_file(filename)]
# create graph using this response JSON
G = create_graph(response_jsons, bidirectional=bidirectional,
retain_all=retain_all, name=name)
# simplify the graph topology as the last step.
if simplify:
G = simplify_graph(G)
log('graph_from_file() returning graph with {:,} nodes and {:,} edges'.format(len(list(G.nodes())), len(list(G.edges()))))
return G | [
"def",
"graph_from_file",
"(",
"filename",
",",
"bidirectional",
"=",
"False",
",",
"simplify",
"=",
"True",
",",
"retain_all",
"=",
"False",
",",
"name",
"=",
"'unnamed'",
")",
":",
"# transmogrify file of OSM XML data into JSON",
"response_jsons",
"=",
"[",
"overpass_json_from_file",
"(",
"filename",
")",
"]",
"# create graph using this response JSON",
"G",
"=",
"create_graph",
"(",
"response_jsons",
",",
"bidirectional",
"=",
"bidirectional",
",",
"retain_all",
"=",
"retain_all",
",",
"name",
"=",
"name",
")",
"# simplify the graph topology as the last step.",
"if",
"simplify",
":",
"G",
"=",
"simplify_graph",
"(",
"G",
")",
"log",
"(",
"'graph_from_file() returning graph with {:,} nodes and {:,} edges'",
".",
"format",
"(",
"len",
"(",
"list",
"(",
"G",
".",
"nodes",
"(",
")",
")",
")",
",",
"len",
"(",
"list",
"(",
"G",
".",
"edges",
"(",
")",
")",
")",
")",
")",
"return",
"G"
]
| 32.057143 | 22.342857 |
def validate(self, request):
""" Checks a request for proper authentication details.
Returns a tuple of ``(access_token, error_response_arguments)``, which are
designed to be passed to the :py:meth:`make_error_response` method.
For example, to restrict access to a given endpoint:
.. code-block:: python
def foo_bar_resource(request, *args, **kwargs):
authenticator = AccessTokenAuthenticator(
required_scope_names=('foo', 'bar'))
access_token, error_args = authenticator.validate(request)
if not access_token:
return authenticator.make_error_response(*error_args)
# ... can now return use access_token
:rtype: When the request validates successfully, returns a
a tuple of (:py:class:`djoauth2.models.AccessToken`, ``None``). If the
request fails to validate, returns a tuple of (``None``,
``error_details_tuple``). The ``error_details_tuple`` is a tuple of
arguments to use to call the :py:func:`make_error_response` method.
"""
# Ensure that all of the scopes that are being checked against exist.
# Otherwise, raise a ValueError.
for name in self.required_scope_names:
if not Scope.objects.filter(name=name).exists():
raise ValueError('Scope with name "{}" does not exist.'.format(name))
# From http://tools.ietf.org/html/rfc6750#section-3.1 :
#
# If the request lacks any authentication information (e.g., the
# client was unaware that authentication is necessary or attempted
# using an unsupported authentication method), the resource server
# SHOULD NOT include an error code or other error information.
#
# In the case that the request fails to validate, this flag will
# be returned and should be passed to the 'make_error_response' method
# in order to comply with the specification and restrict error information.
expose_errors = False
try:
# From http://tools.ietf.org/html/rfc6750#section-1 :
#
# This specification defines the use of bearer tokens over HTTP/1.1
# [RFC2616] using Transport Layer Security (TLS) [RFC5246] to access
# protected resources. TLS is mandatory to implement and use with
# this specification; other specifications may extend this
# specification for use with other protocols. While designed for use
# with access tokens
#
# and later, from http://tools.ietf.org/html/rfc6750#section-5.3 :
#
# Always use TLS (https): Clients MUST always use TLS [RFC5246]
# (https) or equivalent transport security when making requests with
# bearer tokens. Failing to do so exposes the token to numerous
# attacks that could give attackers unintended access.
#
if settings.DJOAUTH2_SSL_ONLY and not request.is_secure():
raise InvalidRequest('insecure request: must use TLS')
http_authorization = request.META.get('HTTP_AUTHORIZATION', '')
if not http_authorization:
raise InvalidRequest('missing HTTP_AUTHORIZATION header')
try:
auth_method, auth_value = http_authorization.strip().split(' ', 1)
except ValueError:
raise InvalidRequest('malformed HTTP_AUTHORIZATION header')
if auth_method != 'Bearer':
raise InvalidRequest('authentication method is not "Bearer"')
# Used in the case that the request does not validate. See comment above.
# At this point in the validation, it is certain that the Client
# attempted to authenticate via the 'Bearer' method.
expose_errors = True
try:
access_token = AccessToken.objects.get(value=auth_value)
except AccessToken.DoesNotExist:
raise InvalidToken('access token does not exist')
if access_token.is_expired():
raise InvalidToken('access token is expired')
if not access_token.has_scope(*self.required_scope_names):
raise InsufficientScope('access token has insufficient scope')
return (access_token, None)
except AuthenticationError as validation_error:
return (None, (validation_error, expose_errors)) | [
"def",
"validate",
"(",
"self",
",",
"request",
")",
":",
"# Ensure that all of the scopes that are being checked against exist.",
"# Otherwise, raise a ValueError.",
"for",
"name",
"in",
"self",
".",
"required_scope_names",
":",
"if",
"not",
"Scope",
".",
"objects",
".",
"filter",
"(",
"name",
"=",
"name",
")",
".",
"exists",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'Scope with name \"{}\" does not exist.'",
".",
"format",
"(",
"name",
")",
")",
"# From http://tools.ietf.org/html/rfc6750#section-3.1 :",
"#",
"# If the request lacks any authentication information (e.g., the",
"# client was unaware that authentication is necessary or attempted",
"# using an unsupported authentication method), the resource server",
"# SHOULD NOT include an error code or other error information.",
"#",
"# In the case that the request fails to validate, this flag will",
"# be returned and should be passed to the 'make_error_response' method",
"# in order to comply with the specification and restrict error information.",
"expose_errors",
"=",
"False",
"try",
":",
"# From http://tools.ietf.org/html/rfc6750#section-1 :",
"#",
"# This specification defines the use of bearer tokens over HTTP/1.1",
"# [RFC2616] using Transport Layer Security (TLS) [RFC5246] to access",
"# protected resources. TLS is mandatory to implement and use with",
"# this specification; other specifications may extend this",
"# specification for use with other protocols. While designed for use",
"# with access tokens",
"#",
"# and later, from http://tools.ietf.org/html/rfc6750#section-5.3 :",
"#",
"# Always use TLS (https): Clients MUST always use TLS [RFC5246]",
"# (https) or equivalent transport security when making requests with",
"# bearer tokens. Failing to do so exposes the token to numerous",
"# attacks that could give attackers unintended access.",
"#",
"if",
"settings",
".",
"DJOAUTH2_SSL_ONLY",
"and",
"not",
"request",
".",
"is_secure",
"(",
")",
":",
"raise",
"InvalidRequest",
"(",
"'insecure request: must use TLS'",
")",
"http_authorization",
"=",
"request",
".",
"META",
".",
"get",
"(",
"'HTTP_AUTHORIZATION'",
",",
"''",
")",
"if",
"not",
"http_authorization",
":",
"raise",
"InvalidRequest",
"(",
"'missing HTTP_AUTHORIZATION header'",
")",
"try",
":",
"auth_method",
",",
"auth_value",
"=",
"http_authorization",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
",",
"1",
")",
"except",
"ValueError",
":",
"raise",
"InvalidRequest",
"(",
"'malformed HTTP_AUTHORIZATION header'",
")",
"if",
"auth_method",
"!=",
"'Bearer'",
":",
"raise",
"InvalidRequest",
"(",
"'authentication method is not \"Bearer\"'",
")",
"# Used in the case that the request does not validate. See comment above.",
"# At this point in the validation, it is certain that the Client",
"# attempted to authenticate via the 'Bearer' method.",
"expose_errors",
"=",
"True",
"try",
":",
"access_token",
"=",
"AccessToken",
".",
"objects",
".",
"get",
"(",
"value",
"=",
"auth_value",
")",
"except",
"AccessToken",
".",
"DoesNotExist",
":",
"raise",
"InvalidToken",
"(",
"'access token does not exist'",
")",
"if",
"access_token",
".",
"is_expired",
"(",
")",
":",
"raise",
"InvalidToken",
"(",
"'access token is expired'",
")",
"if",
"not",
"access_token",
".",
"has_scope",
"(",
"*",
"self",
".",
"required_scope_names",
")",
":",
"raise",
"InsufficientScope",
"(",
"'access token has insufficient scope'",
")",
"return",
"(",
"access_token",
",",
"None",
")",
"except",
"AuthenticationError",
"as",
"validation_error",
":",
"return",
"(",
"None",
",",
"(",
"validation_error",
",",
"expose_errors",
")",
")"
]
| 42.214286 | 27.214286 |
def calculate_connvectivity_radius(self, amount_clusters, maximum_iterations = 100):
"""!
@brief Calculates connectivity radius of allocation specified amount of clusters using ordering diagram and marks borders of clusters using indexes of values of ordering diagram.
@details Parameter 'maximum_iterations' is used to protect from hanging when it is impossible to allocate specified number of clusters.
@param[in] amount_clusters (uint): amount of clusters that should be allocated by calculated connectivity radius.
@param[in] maximum_iterations (uint): maximum number of iteration for searching connectivity radius to allocated specified amount of clusters (by default it is restricted by 100 iterations).
@return (double, list) Value of connectivity radius and borders of clusters like (radius, borders), radius may be 'None' as well as borders may be '[]'
if connectivity radius hasn't been found for the specified amount of iterations.
"""
maximum_distance = max(self.__ordering)
upper_distance = maximum_distance
lower_distance = 0.0
result = None
amount, borders = self.extract_cluster_amount(maximum_distance)
if amount <= amount_clusters:
for _ in range(maximum_iterations):
radius = (lower_distance + upper_distance) / 2.0
amount, borders = self.extract_cluster_amount(radius)
if amount == amount_clusters:
result = radius
break
elif amount == 0:
break
elif amount > amount_clusters:
lower_distance = radius
elif amount < amount_clusters:
upper_distance = radius
return result, borders | [
"def",
"calculate_connvectivity_radius",
"(",
"self",
",",
"amount_clusters",
",",
"maximum_iterations",
"=",
"100",
")",
":",
"maximum_distance",
"=",
"max",
"(",
"self",
".",
"__ordering",
")",
"upper_distance",
"=",
"maximum_distance",
"lower_distance",
"=",
"0.0",
"result",
"=",
"None",
"amount",
",",
"borders",
"=",
"self",
".",
"extract_cluster_amount",
"(",
"maximum_distance",
")",
"if",
"amount",
"<=",
"amount_clusters",
":",
"for",
"_",
"in",
"range",
"(",
"maximum_iterations",
")",
":",
"radius",
"=",
"(",
"lower_distance",
"+",
"upper_distance",
")",
"/",
"2.0",
"amount",
",",
"borders",
"=",
"self",
".",
"extract_cluster_amount",
"(",
"radius",
")",
"if",
"amount",
"==",
"amount_clusters",
":",
"result",
"=",
"radius",
"break",
"elif",
"amount",
"==",
"0",
":",
"break",
"elif",
"amount",
">",
"amount_clusters",
":",
"lower_distance",
"=",
"radius",
"elif",
"amount",
"<",
"amount_clusters",
":",
"upper_distance",
"=",
"radius",
"return",
"result",
",",
"borders"
]
| 49.525 | 32.25 |
def Flush(self, state):
"""Finish writing JSON files, upload to cloudstorage and bigquery."""
self.bigquery = bigquery.GetBigQueryClient()
# BigQuery job ids must be alphanum plus dash and underscore.
urn_str = self.source_urn.RelativeName("aff4:/").replace("/", "_").replace(
":", "").replace(".", "-")
for tracker in itervalues(self.temp_output_trackers):
# Close out the gzip handle and pass the original file handle to the
# bigquery client so it sees the gzip'd content.
tracker.gzip_filehandle.write("\n")
tracker.gzip_filehandle.close()
tracker.gzip_filehandle_parent.seek(0)
# e.g. job_id: hunts_HFFE1D044_Results_ExportedFile_1446056474
job_id = "{0}_{1}_{2}".format(
urn_str, tracker.output_type,
rdfvalue.RDFDatetime.Now().AsSecondsSinceEpoch())
# If we have a job id stored, that means we failed last time. Re-use the
# job id and append to the same file if it continues to fail. This avoids
# writing many files on failure.
if tracker.output_type in self.output_jobids:
job_id = self.output_jobids[tracker.output_type]
else:
self.output_jobids[tracker.output_type] = job_id
if (state.failure_count + self.failure_count >=
config.CONFIG["BigQuery.max_upload_failures"]):
logging.error(
"Exceeded BigQuery.max_upload_failures for %s, giving up.",
self.source_urn)
else:
try:
self.bigquery.InsertData(tracker.output_type,
tracker.gzip_filehandle_parent,
tracker.schema, job_id)
self.failure_count = max(0, self.failure_count - 1)
del self.output_jobids[tracker.output_type]
except bigquery.BigQueryJobUploadError:
self.failure_count += 1
# Now that everything is in bigquery we can remove the output streams
self.temp_output_trackers = {} | [
"def",
"Flush",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"bigquery",
"=",
"bigquery",
".",
"GetBigQueryClient",
"(",
")",
"# BigQuery job ids must be alphanum plus dash and underscore.",
"urn_str",
"=",
"self",
".",
"source_urn",
".",
"RelativeName",
"(",
"\"aff4:/\"",
")",
".",
"replace",
"(",
"\"/\"",
",",
"\"_\"",
")",
".",
"replace",
"(",
"\":\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\".\"",
",",
"\"-\"",
")",
"for",
"tracker",
"in",
"itervalues",
"(",
"self",
".",
"temp_output_trackers",
")",
":",
"# Close out the gzip handle and pass the original file handle to the",
"# bigquery client so it sees the gzip'd content.",
"tracker",
".",
"gzip_filehandle",
".",
"write",
"(",
"\"\\n\"",
")",
"tracker",
".",
"gzip_filehandle",
".",
"close",
"(",
")",
"tracker",
".",
"gzip_filehandle_parent",
".",
"seek",
"(",
"0",
")",
"# e.g. job_id: hunts_HFFE1D044_Results_ExportedFile_1446056474",
"job_id",
"=",
"\"{0}_{1}_{2}\"",
".",
"format",
"(",
"urn_str",
",",
"tracker",
".",
"output_type",
",",
"rdfvalue",
".",
"RDFDatetime",
".",
"Now",
"(",
")",
".",
"AsSecondsSinceEpoch",
"(",
")",
")",
"# If we have a job id stored, that means we failed last time. Re-use the",
"# job id and append to the same file if it continues to fail. This avoids",
"# writing many files on failure.",
"if",
"tracker",
".",
"output_type",
"in",
"self",
".",
"output_jobids",
":",
"job_id",
"=",
"self",
".",
"output_jobids",
"[",
"tracker",
".",
"output_type",
"]",
"else",
":",
"self",
".",
"output_jobids",
"[",
"tracker",
".",
"output_type",
"]",
"=",
"job_id",
"if",
"(",
"state",
".",
"failure_count",
"+",
"self",
".",
"failure_count",
">=",
"config",
".",
"CONFIG",
"[",
"\"BigQuery.max_upload_failures\"",
"]",
")",
":",
"logging",
".",
"error",
"(",
"\"Exceeded BigQuery.max_upload_failures for %s, giving up.\"",
",",
"self",
".",
"source_urn",
")",
"else",
":",
"try",
":",
"self",
".",
"bigquery",
".",
"InsertData",
"(",
"tracker",
".",
"output_type",
",",
"tracker",
".",
"gzip_filehandle_parent",
",",
"tracker",
".",
"schema",
",",
"job_id",
")",
"self",
".",
"failure_count",
"=",
"max",
"(",
"0",
",",
"self",
".",
"failure_count",
"-",
"1",
")",
"del",
"self",
".",
"output_jobids",
"[",
"tracker",
".",
"output_type",
"]",
"except",
"bigquery",
".",
"BigQueryJobUploadError",
":",
"self",
".",
"failure_count",
"+=",
"1",
"# Now that everything is in bigquery we can remove the output streams",
"self",
".",
"temp_output_trackers",
"=",
"{",
"}"
]
| 43.954545 | 19.659091 |
def _arg_varname(self, wire):
"""
Input, Const, and Registers have special input values
"""
if isinstance(wire, (Input, Register)):
return 'd[' + repr(wire.name) + ']' # passed in
elif isinstance(wire, Const):
return str(wire.val) # hardcoded
else:
return self._varname(wire) | [
"def",
"_arg_varname",
"(",
"self",
",",
"wire",
")",
":",
"if",
"isinstance",
"(",
"wire",
",",
"(",
"Input",
",",
"Register",
")",
")",
":",
"return",
"'d['",
"+",
"repr",
"(",
"wire",
".",
"name",
")",
"+",
"']'",
"# passed in",
"elif",
"isinstance",
"(",
"wire",
",",
"Const",
")",
":",
"return",
"str",
"(",
"wire",
".",
"val",
")",
"# hardcoded",
"else",
":",
"return",
"self",
".",
"_varname",
"(",
"wire",
")"
]
| 35.2 | 9.6 |
def com_google_fonts_check_fontv(ttFont):
""" Check for font-v versioning """
from fontv.libfv import FontVersion
fv = FontVersion(ttFont)
if fv.version and (fv.is_development or fv.is_release):
yield PASS, "Font version string looks GREAT!"
else:
yield INFO, ("Version string is: \"{}\"\n"
"The version string must ideally include a git commit hash"
" and either a 'dev' or a 'release' suffix such as in the"
" example below:\n"
"\"Version 1.3; git-0d08353-release\""
"").format(fv.get_name_id5_version_string()) | [
"def",
"com_google_fonts_check_fontv",
"(",
"ttFont",
")",
":",
"from",
"fontv",
".",
"libfv",
"import",
"FontVersion",
"fv",
"=",
"FontVersion",
"(",
"ttFont",
")",
"if",
"fv",
".",
"version",
"and",
"(",
"fv",
".",
"is_development",
"or",
"fv",
".",
"is_release",
")",
":",
"yield",
"PASS",
",",
"\"Font version string looks GREAT!\"",
"else",
":",
"yield",
"INFO",
",",
"(",
"\"Version string is: \\\"{}\\\"\\n\"",
"\"The version string must ideally include a git commit hash\"",
"\" and either a 'dev' or a 'release' suffix such as in the\"",
"\" example below:\\n\"",
"\"\\\"Version 1.3; git-0d08353-release\\\"\"",
"\"\"",
")",
".",
"format",
"(",
"fv",
".",
"get_name_id5_version_string",
"(",
")",
")"
]
| 43.142857 | 16.785714 |
def get_queryset(self, request):
"""Limit Pages to those that belong to the request's user."""
qs = super(VISADeviceAdmin, self).get_queryset(request)
return qs.filter(protocol_id=PROTOCOL_ID) | [
"def",
"get_queryset",
"(",
"self",
",",
"request",
")",
":",
"qs",
"=",
"super",
"(",
"VISADeviceAdmin",
",",
"self",
")",
".",
"get_queryset",
"(",
"request",
")",
"return",
"qs",
".",
"filter",
"(",
"protocol_id",
"=",
"PROTOCOL_ID",
")"
]
| 53.25 | 10 |
def client_for_path(self, path):
"""
Returns a new client with the same root URL and authentication, but
a different specific URL. For instance, if you have a client pointed
at https://analytics.luminoso.com/api/v5/, and you want new ones for
Project A and Project B, you would call:
client_a = client.client_for_path('projects/<project_id_a>')
client_b = client.client_for_path('projects/<project_id_b>')
and your base client would remian unchanged.
Paths with leading slashes are appended to the root url; otherwise,
paths are set relative to the current path.
"""
if path.startswith('/'):
url = self.root_url + path
else:
url = self.url + path
return self.__class__(self.session, url) | [
"def",
"client_for_path",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
".",
"startswith",
"(",
"'/'",
")",
":",
"url",
"=",
"self",
".",
"root_url",
"+",
"path",
"else",
":",
"url",
"=",
"self",
".",
"url",
"+",
"path",
"return",
"self",
".",
"__class__",
"(",
"self",
".",
"session",
",",
"url",
")"
]
| 40.8 | 20.9 |
def main():
"Main program"
generators = check_dependencies()
args = docopt(__doc__, version='md2ebook 0.0.1-dev')
commander = Commander(args, generators)
commander.handle() | [
"def",
"main",
"(",
")",
":",
"generators",
"=",
"check_dependencies",
"(",
")",
"args",
"=",
"docopt",
"(",
"__doc__",
",",
"version",
"=",
"'md2ebook 0.0.1-dev'",
")",
"commander",
"=",
"Commander",
"(",
"args",
",",
"generators",
")",
"commander",
".",
"handle",
"(",
")"
]
| 31.166667 | 15.166667 |
def arg(*args, **kwargs):
"""
Dcorates a function or a class method to add to the argument parser
"""
def decorate(func):
"""
Decorate
"""
# we'll set the command name with the passed cmd_name argument, if
# exist, else the command name will be the function name
func.__cmd_name__ = kwargs.pop(
'cmd_name', getattr(func, '__cmd_name__', func.__name__))
# retrieve the class (SillyClass)
func.__cls__ = utils.check_class()
if not hasattr(func, '__arguments__'):
# if the funcion hasn't the __arguments__ yet, we'll setup them
# using get_functarguments.
func.__arguments__ = utils.get_functarguments(func)
if len(args) or len(kwargs):
# if we have some argument or keyword argument
# we'll try to get the destination name from the kwargs ('dest')
# else we'll use the last arg name as destination
arg_name = kwargs.get(
'dest', args[-1].lstrip('-').replace('-', '_'))
try:
# we try to get the command index.
idx = func.__named__.index(arg_name)
# and delete it from the named list
del func.__named__[idx]
# and delete it from the arguments list
del func.__arguments__[idx]
except ValueError:
pass
# append the args and kwargs to the function arguments list
func.__arguments__.append((args, kwargs,))
if func.__cls__ is None and isinstance(func, types.FunctionType):
# if the function don't have a class and is a FunctionType
# we'll add it directly to he commands list.
ap_ = ArgParseInator(skip_init=True)
if func.__cmd_name__ not in ap_.commands:
# we'll add it if not exists
ap_.commands[func.__cmd_name__] = func
return func
return decorate | [
"def",
"arg",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"decorate",
"(",
"func",
")",
":",
"\"\"\"\n Decorate\n \"\"\"",
"# we'll set the command name with the passed cmd_name argument, if",
"# exist, else the command name will be the function name",
"func",
".",
"__cmd_name__",
"=",
"kwargs",
".",
"pop",
"(",
"'cmd_name'",
",",
"getattr",
"(",
"func",
",",
"'__cmd_name__'",
",",
"func",
".",
"__name__",
")",
")",
"# retrieve the class (SillyClass)",
"func",
".",
"__cls__",
"=",
"utils",
".",
"check_class",
"(",
")",
"if",
"not",
"hasattr",
"(",
"func",
",",
"'__arguments__'",
")",
":",
"# if the funcion hasn't the __arguments__ yet, we'll setup them",
"# using get_functarguments.",
"func",
".",
"__arguments__",
"=",
"utils",
".",
"get_functarguments",
"(",
"func",
")",
"if",
"len",
"(",
"args",
")",
"or",
"len",
"(",
"kwargs",
")",
":",
"# if we have some argument or keyword argument",
"# we'll try to get the destination name from the kwargs ('dest')",
"# else we'll use the last arg name as destination",
"arg_name",
"=",
"kwargs",
".",
"get",
"(",
"'dest'",
",",
"args",
"[",
"-",
"1",
"]",
".",
"lstrip",
"(",
"'-'",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
")",
"try",
":",
"# we try to get the command index.",
"idx",
"=",
"func",
".",
"__named__",
".",
"index",
"(",
"arg_name",
")",
"# and delete it from the named list",
"del",
"func",
".",
"__named__",
"[",
"idx",
"]",
"# and delete it from the arguments list",
"del",
"func",
".",
"__arguments__",
"[",
"idx",
"]",
"except",
"ValueError",
":",
"pass",
"# append the args and kwargs to the function arguments list",
"func",
".",
"__arguments__",
".",
"append",
"(",
"(",
"args",
",",
"kwargs",
",",
")",
")",
"if",
"func",
".",
"__cls__",
"is",
"None",
"and",
"isinstance",
"(",
"func",
",",
"types",
".",
"FunctionType",
")",
":",
"# if the function don't have a class and is a FunctionType",
"# we'll add it directly to he commands list.",
"ap_",
"=",
"ArgParseInator",
"(",
"skip_init",
"=",
"True",
")",
"if",
"func",
".",
"__cmd_name__",
"not",
"in",
"ap_",
".",
"commands",
":",
"# we'll add it if not exists",
"ap_",
".",
"commands",
"[",
"func",
".",
"__cmd_name__",
"]",
"=",
"func",
"return",
"func",
"return",
"decorate"
]
| 44.727273 | 15.045455 |
def send(self, sender: PytgbotApiBot):
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
"""
return sender.send_chat_action(
# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id
action=self.action, chat_id=self.receiver
) | [
"def",
"send",
"(",
"self",
",",
"sender",
":",
"PytgbotApiBot",
")",
":",
"return",
"sender",
".",
"send_chat_action",
"(",
"# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id",
"action",
"=",
"self",
".",
"action",
",",
"chat_id",
"=",
"self",
".",
"receiver",
")"
]
| 33.384615 | 17.230769 |
def list_work_units(self, work_spec_name, start=0, limit=None):
"""Get a dictionary of work units for some work spec.
The dictionary is from work unit name to work unit definiton.
Only work units that have not been completed ("available" or
"pending" work units) are included.
"""
return self.registry.filter(WORK_UNITS_ + work_spec_name,
start=start, limit=limit) | [
"def",
"list_work_units",
"(",
"self",
",",
"work_spec_name",
",",
"start",
"=",
"0",
",",
"limit",
"=",
"None",
")",
":",
"return",
"self",
".",
"registry",
".",
"filter",
"(",
"WORK_UNITS_",
"+",
"work_spec_name",
",",
"start",
"=",
"start",
",",
"limit",
"=",
"limit",
")"
]
| 44.1 | 20.9 |
def send_command_response(self,
source: list,
command: str,
*args,
**kwargs):
"""
Used in bot observer `on_next` method
"""
args = _json.dumps(args).encode('utf8')
kwargs = _json.dumps(kwargs).encode('utf8')
if isinstance(source, list):
frame = (*source, b'', command.encode('utf8'), args, kwargs)
else:
frame = (b'', command.encode('utf8'), args, kwargs)
if self._run_control_loop:
self.add_callback(self.command_socket.send_multipart, frame)
else:
self.command_socket.send_multipart(frame) | [
"def",
"send_command_response",
"(",
"self",
",",
"source",
":",
"list",
",",
"command",
":",
"str",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"_json",
".",
"dumps",
"(",
"args",
")",
".",
"encode",
"(",
"'utf8'",
")",
"kwargs",
"=",
"_json",
".",
"dumps",
"(",
"kwargs",
")",
".",
"encode",
"(",
"'utf8'",
")",
"if",
"isinstance",
"(",
"source",
",",
"list",
")",
":",
"frame",
"=",
"(",
"*",
"source",
",",
"b''",
",",
"command",
".",
"encode",
"(",
"'utf8'",
")",
",",
"args",
",",
"kwargs",
")",
"else",
":",
"frame",
"=",
"(",
"b''",
",",
"command",
".",
"encode",
"(",
"'utf8'",
")",
",",
"args",
",",
"kwargs",
")",
"if",
"self",
".",
"_run_control_loop",
":",
"self",
".",
"add_callback",
"(",
"self",
".",
"command_socket",
".",
"send_multipart",
",",
"frame",
")",
"else",
":",
"self",
".",
"command_socket",
".",
"send_multipart",
"(",
"frame",
")"
]
| 39.666667 | 11.444444 |
def _make_continuation_prompt(self, prompt):
""" Given a plain text version of an In prompt, returns an HTML
continuation prompt.
"""
end_chars = '...: '
space_count = len(prompt.lstrip('\n')) - len(end_chars)
body = ' ' * space_count + end_chars
return '<span class="in-prompt">%s</span>' % body | [
"def",
"_make_continuation_prompt",
"(",
"self",
",",
"prompt",
")",
":",
"end_chars",
"=",
"'...: '",
"space_count",
"=",
"len",
"(",
"prompt",
".",
"lstrip",
"(",
"'\\n'",
")",
")",
"-",
"len",
"(",
"end_chars",
")",
"body",
"=",
"' '",
"*",
"space_count",
"+",
"end_chars",
"return",
"'<span class=\"in-prompt\">%s</span>'",
"%",
"body"
]
| 44.25 | 9.25 |
def show_code_completion(self):
"""Display a completion list based on the current line"""
# Note: unicode conversion is needed only for ExternalShellBase
text = to_text_string(self.get_current_line_to_cursor())
last_obj = self.get_last_obj()
if not text:
return
obj_dir = self.get_dir(last_obj)
if last_obj and obj_dir and text.endswith('.'):
self.show_completion_list(obj_dir)
return
# Builtins and globals
if not text.endswith('.') and last_obj \
and re.match(r'[a-zA-Z_0-9]*$', last_obj):
b_k_g = dir(builtins)+self.get_globals_keys()+keyword.kwlist
for objname in b_k_g:
if objname.startswith(last_obj) and objname != last_obj:
self.show_completion_list(b_k_g, completion_text=last_obj)
return
else:
return
# Looking for an incomplete completion
if last_obj is None:
last_obj = text
dot_pos = last_obj.rfind('.')
if dot_pos != -1:
if dot_pos == len(last_obj)-1:
completion_text = ""
else:
completion_text = last_obj[dot_pos+1:]
last_obj = last_obj[:dot_pos]
completions = self.get_dir(last_obj)
if completions is not None:
self.show_completion_list(completions,
completion_text=completion_text)
return
# Looking for ' or ": filename completion
q_pos = max([text.rfind("'"), text.rfind('"')])
if q_pos != -1:
completions = self.get_cdlistdir()
if completions:
self.show_completion_list(completions,
completion_text=text[q_pos+1:])
return | [
"def",
"show_code_completion",
"(",
"self",
")",
":",
"# Note: unicode conversion is needed only for ExternalShellBase\r",
"text",
"=",
"to_text_string",
"(",
"self",
".",
"get_current_line_to_cursor",
"(",
")",
")",
"last_obj",
"=",
"self",
".",
"get_last_obj",
"(",
")",
"if",
"not",
"text",
":",
"return",
"obj_dir",
"=",
"self",
".",
"get_dir",
"(",
"last_obj",
")",
"if",
"last_obj",
"and",
"obj_dir",
"and",
"text",
".",
"endswith",
"(",
"'.'",
")",
":",
"self",
".",
"show_completion_list",
"(",
"obj_dir",
")",
"return",
"# Builtins and globals\r",
"if",
"not",
"text",
".",
"endswith",
"(",
"'.'",
")",
"and",
"last_obj",
"and",
"re",
".",
"match",
"(",
"r'[a-zA-Z_0-9]*$'",
",",
"last_obj",
")",
":",
"b_k_g",
"=",
"dir",
"(",
"builtins",
")",
"+",
"self",
".",
"get_globals_keys",
"(",
")",
"+",
"keyword",
".",
"kwlist",
"for",
"objname",
"in",
"b_k_g",
":",
"if",
"objname",
".",
"startswith",
"(",
"last_obj",
")",
"and",
"objname",
"!=",
"last_obj",
":",
"self",
".",
"show_completion_list",
"(",
"b_k_g",
",",
"completion_text",
"=",
"last_obj",
")",
"return",
"else",
":",
"return",
"# Looking for an incomplete completion\r",
"if",
"last_obj",
"is",
"None",
":",
"last_obj",
"=",
"text",
"dot_pos",
"=",
"last_obj",
".",
"rfind",
"(",
"'.'",
")",
"if",
"dot_pos",
"!=",
"-",
"1",
":",
"if",
"dot_pos",
"==",
"len",
"(",
"last_obj",
")",
"-",
"1",
":",
"completion_text",
"=",
"\"\"",
"else",
":",
"completion_text",
"=",
"last_obj",
"[",
"dot_pos",
"+",
"1",
":",
"]",
"last_obj",
"=",
"last_obj",
"[",
":",
"dot_pos",
"]",
"completions",
"=",
"self",
".",
"get_dir",
"(",
"last_obj",
")",
"if",
"completions",
"is",
"not",
"None",
":",
"self",
".",
"show_completion_list",
"(",
"completions",
",",
"completion_text",
"=",
"completion_text",
")",
"return",
"# Looking for ' or \": filename completion\r",
"q_pos",
"=",
"max",
"(",
"[",
"text",
".",
"rfind",
"(",
"\"'\"",
")",
",",
"text",
".",
"rfind",
"(",
"'\"'",
")",
"]",
")",
"if",
"q_pos",
"!=",
"-",
"1",
":",
"completions",
"=",
"self",
".",
"get_cdlistdir",
"(",
")",
"if",
"completions",
":",
"self",
".",
"show_completion_list",
"(",
"completions",
",",
"completion_text",
"=",
"text",
"[",
"q_pos",
"+",
"1",
":",
"]",
")",
"return"
]
| 40.145833 | 16.270833 |
def port_profile_vlan_profile_switchport_trunk_trunk_vlan_classification_allowed_vlan_add_trunk_ctag_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
trunk_vlan_classification = ET.SubElement(trunk, "trunk-vlan-classification")
allowed = ET.SubElement(trunk_vlan_classification, "allowed")
vlan = ET.SubElement(allowed, "vlan")
add = ET.SubElement(vlan, "add")
trunk_vlan_id_key = ET.SubElement(add, "trunk-vlan-id")
trunk_vlan_id_key.text = kwargs.pop('trunk_vlan_id')
trunk_ctag_id = ET.SubElement(add, "trunk-ctag-id")
trunk_ctag_id.text = kwargs.pop('trunk_ctag_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"port_profile_vlan_profile_switchport_trunk_trunk_vlan_classification_allowed_vlan_add_trunk_ctag_id",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"port_profile",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"port-profile\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-port-profile\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"port_profile",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"vlan_profile",
"=",
"ET",
".",
"SubElement",
"(",
"port_profile",
",",
"\"vlan-profile\"",
")",
"switchport",
"=",
"ET",
".",
"SubElement",
"(",
"vlan_profile",
",",
"\"switchport\"",
")",
"trunk",
"=",
"ET",
".",
"SubElement",
"(",
"switchport",
",",
"\"trunk\"",
")",
"trunk_vlan_classification",
"=",
"ET",
".",
"SubElement",
"(",
"trunk",
",",
"\"trunk-vlan-classification\"",
")",
"allowed",
"=",
"ET",
".",
"SubElement",
"(",
"trunk_vlan_classification",
",",
"\"allowed\"",
")",
"vlan",
"=",
"ET",
".",
"SubElement",
"(",
"allowed",
",",
"\"vlan\"",
")",
"add",
"=",
"ET",
".",
"SubElement",
"(",
"vlan",
",",
"\"add\"",
")",
"trunk_vlan_id_key",
"=",
"ET",
".",
"SubElement",
"(",
"add",
",",
"\"trunk-vlan-id\"",
")",
"trunk_vlan_id_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'trunk_vlan_id'",
")",
"trunk_ctag_id",
"=",
"ET",
".",
"SubElement",
"(",
"add",
",",
"\"trunk-ctag-id\"",
")",
"trunk_ctag_id",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'trunk_ctag_id'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| 54.666667 | 21.47619 |
def summary(self) -> str:
"""
Condensed report summary created from translations
"""
if not self.translations:
self.update()
return summary.metar(self.translations) | [
"def",
"summary",
"(",
"self",
")",
"->",
"str",
":",
"if",
"not",
"self",
".",
"translations",
":",
"self",
".",
"update",
"(",
")",
"return",
"summary",
".",
"metar",
"(",
"self",
".",
"translations",
")"
]
| 30 | 8.857143 |
def send(self):
""" Send all outstanding requests.
"""
from neobolt.exceptions import ConnectionExpired
if self._connection:
try:
self._connection.send()
except ConnectionExpired as error:
raise SessionExpired(*error.args) | [
"def",
"send",
"(",
"self",
")",
":",
"from",
"neobolt",
".",
"exceptions",
"import",
"ConnectionExpired",
"if",
"self",
".",
"_connection",
":",
"try",
":",
"self",
".",
"_connection",
".",
"send",
"(",
")",
"except",
"ConnectionExpired",
"as",
"error",
":",
"raise",
"SessionExpired",
"(",
"*",
"error",
".",
"args",
")"
]
| 33.555556 | 10.333333 |
def nearpt(positn, a, b, c):
"""
locates the point on the surface of an ellipsoid that is nearest to a
specified position. It also returns the altitude of the
position above the ellipsoid.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/nearpt_c.html
:param positn: Position of a point in bodyfixed frame.
:type positn: 3-Element Array of floats
:param a: Length of semi-axis parallel to x-axis.
:type a: float
:param b: Length of semi-axis parallel to y-axis.
:type b: float
:param c: Length on semi-axis parallel to z-axis.
:type c: float
:return:
Point on the ellipsoid closest to positn,
Altitude of positn above the ellipsoid.
:rtype: tuple
"""
positn = stypes.toDoubleVector(positn)
a = ctypes.c_double(a)
b = ctypes.c_double(b)
c = ctypes.c_double(c)
npoint = stypes.emptyDoubleVector(3)
alt = ctypes.c_double()
libspice.nearpt_c(positn, a, b, c, npoint, ctypes.byref(alt))
return stypes.cVectorToPython(npoint), alt.value | [
"def",
"nearpt",
"(",
"positn",
",",
"a",
",",
"b",
",",
"c",
")",
":",
"positn",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"positn",
")",
"a",
"=",
"ctypes",
".",
"c_double",
"(",
"a",
")",
"b",
"=",
"ctypes",
".",
"c_double",
"(",
"b",
")",
"c",
"=",
"ctypes",
".",
"c_double",
"(",
"c",
")",
"npoint",
"=",
"stypes",
".",
"emptyDoubleVector",
"(",
"3",
")",
"alt",
"=",
"ctypes",
".",
"c_double",
"(",
")",
"libspice",
".",
"nearpt_c",
"(",
"positn",
",",
"a",
",",
"b",
",",
"c",
",",
"npoint",
",",
"ctypes",
".",
"byref",
"(",
"alt",
")",
")",
"return",
"stypes",
".",
"cVectorToPython",
"(",
"npoint",
")",
",",
"alt",
".",
"value"
]
| 35.551724 | 16.517241 |
def definition(self, name):
"""
Get the definition for the property I{name}.
@param name: The property I{name} to find the definition for.
@type name: str
@return: The property definition
@rtype: L{Definition}
@raise AttributeError: On not found.
"""
d = self.definitions.get(name)
if d is None:
raise AttributeError(name)
return d | [
"def",
"definition",
"(",
"self",
",",
"name",
")",
":",
"d",
"=",
"self",
".",
"definitions",
".",
"get",
"(",
"name",
")",
"if",
"d",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"name",
")",
"return",
"d"
]
| 32.230769 | 10.230769 |
def add_and_get(self, delta):
'''
Atomically adds `delta` to the current value.
:param delta: The delta to add.
'''
with self._lock.exclusive:
self._value += delta
return self._value | [
"def",
"add_and_get",
"(",
"self",
",",
"delta",
")",
":",
"with",
"self",
".",
"_lock",
".",
"exclusive",
":",
"self",
".",
"_value",
"+=",
"delta",
"return",
"self",
".",
"_value"
]
| 26.555556 | 16.333333 |
def parse_locals_keylist(locals_, key_list, strlist_=None, prefix=''):
""" For each key in keylist, puts its value in locals into a stringlist
Args:
locals_ (?):
key_list (list):
strlist_ (list): (default = None)
prefix (unicode): (default = u'')
Returns:
list: strlist_
CommandLine:
python -m utool.util_dbg --exec-parse_locals_keylist
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> import utool as ut
>>> locals_ = {'foo': [1, 2, 3], 'bar': 'spam', 'eggs': 4, 'num': 5}
>>> key_list = [(len, 'foo'), 'bar.lower.__name__', 'eggs', 'num', 'other']
>>> strlist_ = None
>>> prefix = u''
>>> strlist_ = parse_locals_keylist(locals_, key_list, strlist_, prefix)
>>> result = ('strlist_ = %s' % (ut.repr2(strlist_, nl=True),))
>>> print(result)
strlist_ = [
' len(foo) = 3',
" bar.lower.__name__ = 'lower'",
' eggs = 4',
' num = 5',
' other = NameError (this likely due to a misformatted printex and is not related to the exception)',
]
"""
from utool import util_str
if strlist_ is None:
strlist_ = []
for key in key_list:
try:
if key is None:
strlist_.append('')
elif isinstance(key, tuple):
# Given a tuple of information
tup = key
func, key_ = tup
val = get_varval_from_locals(key_, locals_)
funcvalstr = six.text_type(func(val))
callname = util_str.get_callable_name(func)
strlist_.append('%s %s(%s) = %s' % (prefix, callname, key_, funcvalstr))
elif isinstance(key, six.string_types):
# Try to infer print from variable name
val = get_varval_from_locals(key, locals_)
#valstr = util_str.truncate_str(repr(val), maxlen=200)
valstr = util_str.truncate_str(util_str.repr2(val), maxlen=200)
strlist_.append('%s %s = %s' % (prefix, key, valstr))
else:
# Try to infer print from variable value
val = key
typestr = repr(type(val))
namestr = get_varname_from_locals(val, locals_)
#valstr = util_str.truncate_str(repr(val), maxlen=200)
valstr = util_str.truncate_str(util_str.repr2(val), maxlen=200)
strlist_.append('%s %s %s = %s' % (prefix, typestr, namestr, valstr))
except AssertionError as ex:
strlist_.append(prefix + ' ' + six.text_type(ex) + ' (this likely due to a misformatted printex and is not related to the exception)')
return strlist_ | [
"def",
"parse_locals_keylist",
"(",
"locals_",
",",
"key_list",
",",
"strlist_",
"=",
"None",
",",
"prefix",
"=",
"''",
")",
":",
"from",
"utool",
"import",
"util_str",
"if",
"strlist_",
"is",
"None",
":",
"strlist_",
"=",
"[",
"]",
"for",
"key",
"in",
"key_list",
":",
"try",
":",
"if",
"key",
"is",
"None",
":",
"strlist_",
".",
"append",
"(",
"''",
")",
"elif",
"isinstance",
"(",
"key",
",",
"tuple",
")",
":",
"# Given a tuple of information",
"tup",
"=",
"key",
"func",
",",
"key_",
"=",
"tup",
"val",
"=",
"get_varval_from_locals",
"(",
"key_",
",",
"locals_",
")",
"funcvalstr",
"=",
"six",
".",
"text_type",
"(",
"func",
"(",
"val",
")",
")",
"callname",
"=",
"util_str",
".",
"get_callable_name",
"(",
"func",
")",
"strlist_",
".",
"append",
"(",
"'%s %s(%s) = %s'",
"%",
"(",
"prefix",
",",
"callname",
",",
"key_",
",",
"funcvalstr",
")",
")",
"elif",
"isinstance",
"(",
"key",
",",
"six",
".",
"string_types",
")",
":",
"# Try to infer print from variable name",
"val",
"=",
"get_varval_from_locals",
"(",
"key",
",",
"locals_",
")",
"#valstr = util_str.truncate_str(repr(val), maxlen=200)",
"valstr",
"=",
"util_str",
".",
"truncate_str",
"(",
"util_str",
".",
"repr2",
"(",
"val",
")",
",",
"maxlen",
"=",
"200",
")",
"strlist_",
".",
"append",
"(",
"'%s %s = %s'",
"%",
"(",
"prefix",
",",
"key",
",",
"valstr",
")",
")",
"else",
":",
"# Try to infer print from variable value",
"val",
"=",
"key",
"typestr",
"=",
"repr",
"(",
"type",
"(",
"val",
")",
")",
"namestr",
"=",
"get_varname_from_locals",
"(",
"val",
",",
"locals_",
")",
"#valstr = util_str.truncate_str(repr(val), maxlen=200)",
"valstr",
"=",
"util_str",
".",
"truncate_str",
"(",
"util_str",
".",
"repr2",
"(",
"val",
")",
",",
"maxlen",
"=",
"200",
")",
"strlist_",
".",
"append",
"(",
"'%s %s %s = %s'",
"%",
"(",
"prefix",
",",
"typestr",
",",
"namestr",
",",
"valstr",
")",
")",
"except",
"AssertionError",
"as",
"ex",
":",
"strlist_",
".",
"append",
"(",
"prefix",
"+",
"' '",
"+",
"six",
".",
"text_type",
"(",
"ex",
")",
"+",
"' (this likely due to a misformatted printex and is not related to the exception)'",
")",
"return",
"strlist_"
]
| 41.134328 | 22.597015 |
def post_content(url, headers={}, post_data={}, decoded=True, **kwargs):
"""Post the content of a URL via sending a HTTP POST request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
"""
if kwargs.get('post_data_raw'):
logging.debug('post_content: %s\npost_data_raw: %s' % (url, kwargs['post_data_raw']))
else:
logging.debug('post_content: %s\npost_data: %s' % (url, post_data))
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
if kwargs.get('post_data_raw'):
post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8')
else:
post_data_enc = bytes(parse.urlencode(post_data), 'utf-8')
response = urlopen_with_retry(req, data=post_data_enc)
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(
response.getheader('Content-Type'), r'charset=([\w-]+)'
)
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data | [
"def",
"post_content",
"(",
"url",
",",
"headers",
"=",
"{",
"}",
",",
"post_data",
"=",
"{",
"}",
",",
"decoded",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
".",
"get",
"(",
"'post_data_raw'",
")",
":",
"logging",
".",
"debug",
"(",
"'post_content: %s\\npost_data_raw: %s'",
"%",
"(",
"url",
",",
"kwargs",
"[",
"'post_data_raw'",
"]",
")",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"'post_content: %s\\npost_data: %s'",
"%",
"(",
"url",
",",
"post_data",
")",
")",
"req",
"=",
"request",
".",
"Request",
"(",
"url",
",",
"headers",
"=",
"headers",
")",
"if",
"cookies",
":",
"cookies",
".",
"add_cookie_header",
"(",
"req",
")",
"req",
".",
"headers",
".",
"update",
"(",
"req",
".",
"unredirected_hdrs",
")",
"if",
"kwargs",
".",
"get",
"(",
"'post_data_raw'",
")",
":",
"post_data_enc",
"=",
"bytes",
"(",
"kwargs",
"[",
"'post_data_raw'",
"]",
",",
"'utf-8'",
")",
"else",
":",
"post_data_enc",
"=",
"bytes",
"(",
"parse",
".",
"urlencode",
"(",
"post_data",
")",
",",
"'utf-8'",
")",
"response",
"=",
"urlopen_with_retry",
"(",
"req",
",",
"data",
"=",
"post_data_enc",
")",
"data",
"=",
"response",
".",
"read",
"(",
")",
"# Handle HTTP compression for gzip and deflate (zlib)",
"content_encoding",
"=",
"response",
".",
"getheader",
"(",
"'Content-Encoding'",
")",
"if",
"content_encoding",
"==",
"'gzip'",
":",
"data",
"=",
"ungzip",
"(",
"data",
")",
"elif",
"content_encoding",
"==",
"'deflate'",
":",
"data",
"=",
"undeflate",
"(",
"data",
")",
"# Decode the response body",
"if",
"decoded",
":",
"charset",
"=",
"match1",
"(",
"response",
".",
"getheader",
"(",
"'Content-Type'",
")",
",",
"r'charset=([\\w-]+)'",
")",
"if",
"charset",
"is",
"not",
"None",
":",
"data",
"=",
"data",
".",
"decode",
"(",
"charset",
")",
"else",
":",
"data",
"=",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"data"
]
| 33.777778 | 21.288889 |
def fnmatches(fname, patterns, matchfun):
""""
matches?
:param fname: file name
:type fname: str
:param patterns: list of filename pattern. see fnmatch.fnamtch
:type patterns: [str]
:rtype: generator of bool
"""
import fnmatch
matchfun = matchfun or fnmatch.fnmatch
for p in patterns:
yield matchfun(fname, p) | [
"def",
"fnmatches",
"(",
"fname",
",",
"patterns",
",",
"matchfun",
")",
":",
"import",
"fnmatch",
"matchfun",
"=",
"matchfun",
"or",
"fnmatch",
".",
"fnmatch",
"for",
"p",
"in",
"patterns",
":",
"yield",
"matchfun",
"(",
"fname",
",",
"p",
")"
]
| 26.846154 | 12.615385 |
def add_root_bin(self, bin_id):
"""Adds a root bin.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
raise: AlreadyExists - ``bin_id`` is already in hierarchy
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_root_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_root_catalog(catalog_id=bin_id)
return self._hierarchy_session.add_root(id_=bin_id) | [
"def",
"add_root_bin",
"(",
"self",
",",
"bin_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinHierarchyDesignSession.add_root_bin_template",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"add_root_catalog",
"(",
"catalog_id",
"=",
"bin_id",
")",
"return",
"self",
".",
"_hierarchy_session",
".",
"add_root",
"(",
"id_",
"=",
"bin_id",
")"
]
| 44.941176 | 18 |
def validate(**kwargs):
"""Defines a decorator to register a validator with a name for look-up.
If name is not provided we use function name as name of the validator.
"""
def decorator(func):
_VALIDATORS[kwargs.pop('name', func.__name__)] = func
return func
return decorator | [
"def",
"validate",
"(",
"*",
"*",
"kwargs",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"_VALIDATORS",
"[",
"kwargs",
".",
"pop",
"(",
"'name'",
",",
"func",
".",
"__name__",
")",
"]",
"=",
"func",
"return",
"func",
"return",
"decorator"
]
| 30.3 | 20.9 |
def _doRequest(self, request=None, is_file=False, file_xml_uri=''):
"""This function will perform the specified request on the FileMaker
server, and it will return the raw result from FileMaker."""
if request is None:
request = []
if is_file and file_xml_uri:
url = self._buildFileUrl(file_xml_uri)
else:
request = '&'.join(request)
url = "%s?%s" % (self._buildUrl(), request)
if self._debug:
print '[PyFileMaker DEBUG] ', url
resp = requests.get(
url = url,
auth = (self._login, self._password)
)
resp.raise_for_status()
return resp.content | [
"def",
"_doRequest",
"(",
"self",
",",
"request",
"=",
"None",
",",
"is_file",
"=",
"False",
",",
"file_xml_uri",
"=",
"''",
")",
":",
"if",
"request",
"is",
"None",
":",
"request",
"=",
"[",
"]",
"if",
"is_file",
"and",
"file_xml_uri",
":",
"url",
"=",
"self",
".",
"_buildFileUrl",
"(",
"file_xml_uri",
")",
"else",
":",
"request",
"=",
"'&'",
".",
"join",
"(",
"request",
")",
"url",
"=",
"\"%s?%s\"",
"%",
"(",
"self",
".",
"_buildUrl",
"(",
")",
",",
"request",
")",
"if",
"self",
".",
"_debug",
":",
"print",
"'[PyFileMaker DEBUG] '",
",",
"url",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
"=",
"url",
",",
"auth",
"=",
"(",
"self",
".",
"_login",
",",
"self",
".",
"_password",
")",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"return",
"resp",
".",
"content"
]
| 25.681818 | 19.772727 |
def parse_content(self, content):
"""
All child classes inherit this function to parse XML file automatically.
It will call the function :func:`parse_dom` by default to
parser all necessary data to :attr:`data` and the :attr:`xmlns` (the
default namespace) is ready for this function.
"""
self.dom = self.xmlns = None
self.data = {}
# ignore empty xml file
if len(content) > 3:
self.raw = '\n'.join(content)
self.dom = ET.fromstring(self.raw)
self.xmlns = self.dom.tag.strip("{").split("}")[0] if all(c in self.dom.tag for c in ["{", "}"]) else ""
self.data = self.parse_dom() | [
"def",
"parse_content",
"(",
"self",
",",
"content",
")",
":",
"self",
".",
"dom",
"=",
"self",
".",
"xmlns",
"=",
"None",
"self",
".",
"data",
"=",
"{",
"}",
"# ignore empty xml file",
"if",
"len",
"(",
"content",
")",
">",
"3",
":",
"self",
".",
"raw",
"=",
"'\\n'",
".",
"join",
"(",
"content",
")",
"self",
".",
"dom",
"=",
"ET",
".",
"fromstring",
"(",
"self",
".",
"raw",
")",
"self",
".",
"xmlns",
"=",
"self",
".",
"dom",
".",
"tag",
".",
"strip",
"(",
"\"{\"",
")",
".",
"split",
"(",
"\"}\"",
")",
"[",
"0",
"]",
"if",
"all",
"(",
"c",
"in",
"self",
".",
"dom",
".",
"tag",
"for",
"c",
"in",
"[",
"\"{\"",
",",
"\"}\"",
"]",
")",
"else",
"\"\"",
"self",
".",
"data",
"=",
"self",
".",
"parse_dom",
"(",
")"
]
| 46 | 16.533333 |
def upsert(self, table: str, record: dict, create_cols: bool=False,
dtypes: list=None, pks=["id"], namefields=["id"]):
"""
Upsert a record in a table
"""
try:
self.db[table].upsert(record, pks, create_cols, dtypes)
except Exception as e:
self.err(e, "Can not upsert data")
return
names = ""
for el in namefields:
names += " " + record[el]
self.ok("Upserted record"+names) | [
"def",
"upsert",
"(",
"self",
",",
"table",
":",
"str",
",",
"record",
":",
"dict",
",",
"create_cols",
":",
"bool",
"=",
"False",
",",
"dtypes",
":",
"list",
"=",
"None",
",",
"pks",
"=",
"[",
"\"id\"",
"]",
",",
"namefields",
"=",
"[",
"\"id\"",
"]",
")",
":",
"try",
":",
"self",
".",
"db",
"[",
"table",
"]",
".",
"upsert",
"(",
"record",
",",
"pks",
",",
"create_cols",
",",
"dtypes",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"\"Can not upsert data\"",
")",
"return",
"names",
"=",
"\"\"",
"for",
"el",
"in",
"namefields",
":",
"names",
"+=",
"\" \"",
"+",
"record",
"[",
"el",
"]",
"self",
".",
"ok",
"(",
"\"Upserted record\"",
"+",
"names",
")"
]
| 34.642857 | 13.357143 |
def filter(self, *args, **kwargs):
"""
Returns a new TaskQuerySet with the given filters added.
"""
clone = self._clone()
for f in args:
clone.filter_obj.add_filter(f)
for key, value in kwargs.items():
clone.filter_obj.add_filter_param(key, value)
return clone | [
"def",
"filter",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"clone",
"=",
"self",
".",
"_clone",
"(",
")",
"for",
"f",
"in",
"args",
":",
"clone",
".",
"filter_obj",
".",
"add_filter",
"(",
"f",
")",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"clone",
".",
"filter_obj",
".",
"add_filter_param",
"(",
"key",
",",
"value",
")",
"return",
"clone"
]
| 33.1 | 9.9 |
def find_in_coord_list(coord_list, coord, atol=1e-8):
"""
Find the indices of matches of a particular coord in a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(coord_list) == 0:
return []
diff = np.array(coord_list) - np.array(coord)[None, :]
return np.where(np.all(np.abs(diff) < atol, axis=1))[0] | [
"def",
"find_in_coord_list",
"(",
"coord_list",
",",
"coord",
",",
"atol",
"=",
"1e-8",
")",
":",
"if",
"len",
"(",
"coord_list",
")",
"==",
"0",
":",
"return",
"[",
"]",
"diff",
"=",
"np",
".",
"array",
"(",
"coord_list",
")",
"-",
"np",
".",
"array",
"(",
"coord",
")",
"[",
"None",
",",
":",
"]",
"return",
"np",
".",
"where",
"(",
"np",
".",
"all",
"(",
"np",
".",
"abs",
"(",
"diff",
")",
"<",
"atol",
",",
"axis",
"=",
"1",
")",
")",
"[",
"0",
"]"
]
| 33.058824 | 20.588235 |
def get_output_list_from_task(task, placeholder_dict):
"""
Purpose: Parse a Task object to extract the files to be staged as the output.
Details: The extracted data is then converted into the appropriate RP directive depending on whether the data
is to be copied/downloaded.
:arguments:
:task: EnTK Task object
:placeholder_dict: dictionary holding the values for placeholders
:return: list of RP directives for the files that need to be staged out
"""
try:
if not isinstance(task, Task):
raise TypeError(expected_type=Task, actual_type=type(task))
output_data = []
if task.copy_output_data:
for path in task.copy_output_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.COPY
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.COPY
}
output_data.append(temp)
if task.download_output_data:
for path in task.download_output_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip()
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip())
}
output_data.append(temp)
if task.move_output_data:
for path in task.move_output_data:
path = resolve_placeholders(path, placeholder_dict)
if len(path.split('>')) > 1:
temp = {
'source': path.split('>')[0].strip(),
'target': path.split('>')[1].strip(),
'action': rp.MOVE
}
else:
temp = {
'source': path.split('>')[0].strip(),
'target': os.path.basename(path.split('>')[0].strip()),
'action': rp.MOVE
}
output_data.append(temp)
return output_data
except Exception, ex:
logger.exception('Failed to get output list of files from task, error: %s' % ex)
raise | [
"def",
"get_output_list_from_task",
"(",
"task",
",",
"placeholder_dict",
")",
":",
"try",
":",
"if",
"not",
"isinstance",
"(",
"task",
",",
"Task",
")",
":",
"raise",
"TypeError",
"(",
"expected_type",
"=",
"Task",
",",
"actual_type",
"=",
"type",
"(",
"task",
")",
")",
"output_data",
"=",
"[",
"]",
"if",
"task",
".",
"copy_output_data",
":",
"for",
"path",
"in",
"task",
".",
"copy_output_data",
":",
"path",
"=",
"resolve_placeholders",
"(",
"path",
",",
"placeholder_dict",
")",
"if",
"len",
"(",
"path",
".",
"split",
"(",
"'>'",
")",
")",
">",
"1",
":",
"temp",
"=",
"{",
"'source'",
":",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"'target'",
":",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
",",
"'action'",
":",
"rp",
".",
"COPY",
"}",
"else",
":",
"temp",
"=",
"{",
"'source'",
":",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"'target'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
",",
"'action'",
":",
"rp",
".",
"COPY",
"}",
"output_data",
".",
"append",
"(",
"temp",
")",
"if",
"task",
".",
"download_output_data",
":",
"for",
"path",
"in",
"task",
".",
"download_output_data",
":",
"path",
"=",
"resolve_placeholders",
"(",
"path",
",",
"placeholder_dict",
")",
"if",
"len",
"(",
"path",
".",
"split",
"(",
"'>'",
")",
")",
">",
"1",
":",
"temp",
"=",
"{",
"'source'",
":",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"'target'",
":",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"}",
"else",
":",
"temp",
"=",
"{",
"'source'",
":",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"'target'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
"}",
"output_data",
".",
"append",
"(",
"temp",
")",
"if",
"task",
".",
"move_output_data",
":",
"for",
"path",
"in",
"task",
".",
"move_output_data",
":",
"path",
"=",
"resolve_placeholders",
"(",
"path",
",",
"placeholder_dict",
")",
"if",
"len",
"(",
"path",
".",
"split",
"(",
"'>'",
")",
")",
">",
"1",
":",
"temp",
"=",
"{",
"'source'",
":",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"'target'",
":",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
",",
"'action'",
":",
"rp",
".",
"MOVE",
"}",
"else",
":",
"temp",
"=",
"{",
"'source'",
":",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"'target'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"path",
".",
"split",
"(",
"'>'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
",",
"'action'",
":",
"rp",
".",
"MOVE",
"}",
"output_data",
".",
"append",
"(",
"temp",
")",
"return",
"output_data",
"except",
"Exception",
",",
"ex",
":",
"logger",
".",
"exception",
"(",
"'Failed to get output list of files from task, error: %s'",
"%",
"ex",
")",
"raise"
]
| 30.922222 | 23.855556 |
def add_field(self, model, field):
"""Ran when a field is added to a model."""
for key in self._iterate_required_keys(field):
self._create_hstore_required(
model._meta.db_table,
field,
key
) | [
"def",
"add_field",
"(",
"self",
",",
"model",
",",
"field",
")",
":",
"for",
"key",
"in",
"self",
".",
"_iterate_required_keys",
"(",
"field",
")",
":",
"self",
".",
"_create_hstore_required",
"(",
"model",
".",
"_meta",
".",
"db_table",
",",
"field",
",",
"key",
")"
]
| 30.111111 | 14.444444 |
def construct(self, request, service=None, http_args=None, **kwargs):
"""
Constructs a client assertion and signs it with a key.
The request is modified as a side effect.
:param request: The request
:param service: A :py:class:`oidcservice.service.Service` instance
:param http_args: HTTP arguments
:param kwargs: Extra arguments
:return: Constructed HTTP arguments, in this case none
"""
if 'client_assertion' in kwargs:
request["client_assertion"] = kwargs['client_assertion']
if 'client_assertion_type' in kwargs:
request[
'client_assertion_type'] = kwargs['client_assertion_type']
else:
request["client_assertion_type"] = JWT_BEARER
elif 'client_assertion' in request:
if 'client_assertion_type' not in request:
request["client_assertion_type"] = JWT_BEARER
else:
algorithm = None
_context = service.service_context
# audience for the signed JWT depends on which endpoint
# we're talking to.
if kwargs['authn_endpoint'] in ['token_endpoint']:
try:
algorithm = _context.behaviour[
'token_endpoint_auth_signing_alg']
except (KeyError, AttributeError):
pass
audience = _context.provider_info['token_endpoint']
else:
audience = _context.provider_info['issuer']
if not algorithm:
algorithm = self.choose_algorithm(**kwargs)
ktype = alg2keytype(algorithm)
try:
if 'kid' in kwargs:
signing_key = [self.get_key_by_kid(kwargs["kid"], algorithm,
_context)]
elif ktype in _context.kid["sig"]:
try:
signing_key = [self.get_key_by_kid(
_context.kid["sig"][ktype], algorithm, _context)]
except KeyError:
signing_key = self.get_signing_key(algorithm, _context)
else:
signing_key = self.get_signing_key(algorithm, _context)
except NoMatchingKey as err:
logger.error("%s" % sanitize(err))
raise
try:
_args = {'lifetime': kwargs['lifetime']}
except KeyError:
_args = {}
# construct the signed JWT with the assertions and add
# it as value to the 'client_assertion' claim of the request
request["client_assertion"] = assertion_jwt(
_context.client_id, signing_key, audience,
algorithm, **_args)
request["client_assertion_type"] = JWT_BEARER
try:
del request["client_secret"]
except KeyError:
pass
# If client_id is not required to be present, remove it.
if not request.c_param["client_id"][VREQUIRED]:
try:
del request["client_id"]
except KeyError:
pass
return {} | [
"def",
"construct",
"(",
"self",
",",
"request",
",",
"service",
"=",
"None",
",",
"http_args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'client_assertion'",
"in",
"kwargs",
":",
"request",
"[",
"\"client_assertion\"",
"]",
"=",
"kwargs",
"[",
"'client_assertion'",
"]",
"if",
"'client_assertion_type'",
"in",
"kwargs",
":",
"request",
"[",
"'client_assertion_type'",
"]",
"=",
"kwargs",
"[",
"'client_assertion_type'",
"]",
"else",
":",
"request",
"[",
"\"client_assertion_type\"",
"]",
"=",
"JWT_BEARER",
"elif",
"'client_assertion'",
"in",
"request",
":",
"if",
"'client_assertion_type'",
"not",
"in",
"request",
":",
"request",
"[",
"\"client_assertion_type\"",
"]",
"=",
"JWT_BEARER",
"else",
":",
"algorithm",
"=",
"None",
"_context",
"=",
"service",
".",
"service_context",
"# audience for the signed JWT depends on which endpoint",
"# we're talking to.",
"if",
"kwargs",
"[",
"'authn_endpoint'",
"]",
"in",
"[",
"'token_endpoint'",
"]",
":",
"try",
":",
"algorithm",
"=",
"_context",
".",
"behaviour",
"[",
"'token_endpoint_auth_signing_alg'",
"]",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"pass",
"audience",
"=",
"_context",
".",
"provider_info",
"[",
"'token_endpoint'",
"]",
"else",
":",
"audience",
"=",
"_context",
".",
"provider_info",
"[",
"'issuer'",
"]",
"if",
"not",
"algorithm",
":",
"algorithm",
"=",
"self",
".",
"choose_algorithm",
"(",
"*",
"*",
"kwargs",
")",
"ktype",
"=",
"alg2keytype",
"(",
"algorithm",
")",
"try",
":",
"if",
"'kid'",
"in",
"kwargs",
":",
"signing_key",
"=",
"[",
"self",
".",
"get_key_by_kid",
"(",
"kwargs",
"[",
"\"kid\"",
"]",
",",
"algorithm",
",",
"_context",
")",
"]",
"elif",
"ktype",
"in",
"_context",
".",
"kid",
"[",
"\"sig\"",
"]",
":",
"try",
":",
"signing_key",
"=",
"[",
"self",
".",
"get_key_by_kid",
"(",
"_context",
".",
"kid",
"[",
"\"sig\"",
"]",
"[",
"ktype",
"]",
",",
"algorithm",
",",
"_context",
")",
"]",
"except",
"KeyError",
":",
"signing_key",
"=",
"self",
".",
"get_signing_key",
"(",
"algorithm",
",",
"_context",
")",
"else",
":",
"signing_key",
"=",
"self",
".",
"get_signing_key",
"(",
"algorithm",
",",
"_context",
")",
"except",
"NoMatchingKey",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"\"%s\"",
"%",
"sanitize",
"(",
"err",
")",
")",
"raise",
"try",
":",
"_args",
"=",
"{",
"'lifetime'",
":",
"kwargs",
"[",
"'lifetime'",
"]",
"}",
"except",
"KeyError",
":",
"_args",
"=",
"{",
"}",
"# construct the signed JWT with the assertions and add",
"# it as value to the 'client_assertion' claim of the request",
"request",
"[",
"\"client_assertion\"",
"]",
"=",
"assertion_jwt",
"(",
"_context",
".",
"client_id",
",",
"signing_key",
",",
"audience",
",",
"algorithm",
",",
"*",
"*",
"_args",
")",
"request",
"[",
"\"client_assertion_type\"",
"]",
"=",
"JWT_BEARER",
"try",
":",
"del",
"request",
"[",
"\"client_secret\"",
"]",
"except",
"KeyError",
":",
"pass",
"# If client_id is not required to be present, remove it.",
"if",
"not",
"request",
".",
"c_param",
"[",
"\"client_id\"",
"]",
"[",
"VREQUIRED",
"]",
":",
"try",
":",
"del",
"request",
"[",
"\"client_id\"",
"]",
"except",
"KeyError",
":",
"pass",
"return",
"{",
"}"
]
| 38.542169 | 19.795181 |
def columns(self, *args) -> List[List[Well]]:
"""
Accessor function used to navigate through a labware by column.
With indexing one can treat it as a typical python nested list.
To access row A for example,
simply write: labware.columns()[0]
This will output ['A1', 'B1', 'C1', 'D1'...].
Note that this method takes args for backward-compatibility, but use
of args is deprecated and will be removed in future versions. Args
can be either strings or integers, but must all be the same type (e.g.:
`self.columns(1, 4, 8)` or `self.columns('1', '2')`, but
`self.columns('1', 4)` is invalid.
:return: A list of column lists
"""
col_dict = self._create_indexed_dictionary(group=2)
keys = sorted(col_dict, key=lambda x: int(x))
if not args:
res = [col_dict[key] for key in keys]
elif isinstance(args[0], int):
res = [col_dict[keys[idx]] for idx in args]
elif isinstance(args[0], str):
res = [col_dict[idx] for idx in args]
else:
raise TypeError
return res | [
"def",
"columns",
"(",
"self",
",",
"*",
"args",
")",
"->",
"List",
"[",
"List",
"[",
"Well",
"]",
"]",
":",
"col_dict",
"=",
"self",
".",
"_create_indexed_dictionary",
"(",
"group",
"=",
"2",
")",
"keys",
"=",
"sorted",
"(",
"col_dict",
",",
"key",
"=",
"lambda",
"x",
":",
"int",
"(",
"x",
")",
")",
"if",
"not",
"args",
":",
"res",
"=",
"[",
"col_dict",
"[",
"key",
"]",
"for",
"key",
"in",
"keys",
"]",
"elif",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"int",
")",
":",
"res",
"=",
"[",
"col_dict",
"[",
"keys",
"[",
"idx",
"]",
"]",
"for",
"idx",
"in",
"args",
"]",
"elif",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"str",
")",
":",
"res",
"=",
"[",
"col_dict",
"[",
"idx",
"]",
"for",
"idx",
"in",
"args",
"]",
"else",
":",
"raise",
"TypeError",
"return",
"res"
]
| 39.068966 | 18.37931 |
def transport_param(image):
""" Parse DockerImage info into skopeo parameter
:param image: DockerImage
:return: string. skopeo parameter specifying image
"""
transports = {SkopeoTransport.CONTAINERS_STORAGE: "containers-storage:",
SkopeoTransport.DIRECTORY: "dir:",
SkopeoTransport.DOCKER: "docker://",
SkopeoTransport.DOCKER_ARCHIVE: "docker-archive",
SkopeoTransport.DOCKER_DAEMON: "docker-daemon:",
SkopeoTransport.OCI: "oci:",
SkopeoTransport.OSTREE: "ostree:"}
transport = image.transport
tag = image.tag
repository = image.name
path = image.path
if not transport:
transport = SkopeoTransport.DOCKER
command = transports[transport]
path_required = [SkopeoTransport.DIRECTORY, SkopeoTransport.DOCKER_ARCHIVE, SkopeoTransport.OCI]
if transport in path_required and path is None:
raise ValueError(transports[transport] + " path is required to be specified")
if transport == SkopeoTransport.DIRECTORY:
return command + path
if transport == SkopeoTransport.DOCKER_ARCHIVE:
command += path
if repository is None:
return command
command += ":"
if transport in [SkopeoTransport.CONTAINERS_STORAGE, SkopeoTransport.DOCKER,
SkopeoTransport.DOCKER_ARCHIVE, transport.DOCKER_DAEMON]:
return command + repository + ":" + tag
if transport == SkopeoTransport.OCI:
return command + path + ":" + tag
if transport == SkopeoTransport.OSTREE:
return command + repository + ("@" + path if path else "")
raise ConuException("This transport is not supported") | [
"def",
"transport_param",
"(",
"image",
")",
":",
"transports",
"=",
"{",
"SkopeoTransport",
".",
"CONTAINERS_STORAGE",
":",
"\"containers-storage:\"",
",",
"SkopeoTransport",
".",
"DIRECTORY",
":",
"\"dir:\"",
",",
"SkopeoTransport",
".",
"DOCKER",
":",
"\"docker://\"",
",",
"SkopeoTransport",
".",
"DOCKER_ARCHIVE",
":",
"\"docker-archive\"",
",",
"SkopeoTransport",
".",
"DOCKER_DAEMON",
":",
"\"docker-daemon:\"",
",",
"SkopeoTransport",
".",
"OCI",
":",
"\"oci:\"",
",",
"SkopeoTransport",
".",
"OSTREE",
":",
"\"ostree:\"",
"}",
"transport",
"=",
"image",
".",
"transport",
"tag",
"=",
"image",
".",
"tag",
"repository",
"=",
"image",
".",
"name",
"path",
"=",
"image",
".",
"path",
"if",
"not",
"transport",
":",
"transport",
"=",
"SkopeoTransport",
".",
"DOCKER",
"command",
"=",
"transports",
"[",
"transport",
"]",
"path_required",
"=",
"[",
"SkopeoTransport",
".",
"DIRECTORY",
",",
"SkopeoTransport",
".",
"DOCKER_ARCHIVE",
",",
"SkopeoTransport",
".",
"OCI",
"]",
"if",
"transport",
"in",
"path_required",
"and",
"path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"transports",
"[",
"transport",
"]",
"+",
"\" path is required to be specified\"",
")",
"if",
"transport",
"==",
"SkopeoTransport",
".",
"DIRECTORY",
":",
"return",
"command",
"+",
"path",
"if",
"transport",
"==",
"SkopeoTransport",
".",
"DOCKER_ARCHIVE",
":",
"command",
"+=",
"path",
"if",
"repository",
"is",
"None",
":",
"return",
"command",
"command",
"+=",
"\":\"",
"if",
"transport",
"in",
"[",
"SkopeoTransport",
".",
"CONTAINERS_STORAGE",
",",
"SkopeoTransport",
".",
"DOCKER",
",",
"SkopeoTransport",
".",
"DOCKER_ARCHIVE",
",",
"transport",
".",
"DOCKER_DAEMON",
"]",
":",
"return",
"command",
"+",
"repository",
"+",
"\":\"",
"+",
"tag",
"if",
"transport",
"==",
"SkopeoTransport",
".",
"OCI",
":",
"return",
"command",
"+",
"path",
"+",
"\":\"",
"+",
"tag",
"if",
"transport",
"==",
"SkopeoTransport",
".",
"OSTREE",
":",
"return",
"command",
"+",
"repository",
"+",
"(",
"\"@\"",
"+",
"path",
"if",
"path",
"else",
"\"\"",
")",
"raise",
"ConuException",
"(",
"\"This transport is not supported\"",
")"
]
| 39.395349 | 19.418605 |
def partsphere(self, x):
"""Sphere (squared norm) test objective function"""
self.counter += 1
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
dim = len(x)
x = array([x[i % dim] for i in xrange(2 * dim)])
N = 8
i = self.counter % dim
# f = sum(x[i:i + N]**2)
f = sum(x[np.random.randint(dim, size=N)]**2)
return f | [
"def",
"partsphere",
"(",
"self",
",",
"x",
")",
":",
"self",
".",
"counter",
"+=",
"1",
"# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]",
"dim",
"=",
"len",
"(",
"x",
")",
"x",
"=",
"array",
"(",
"[",
"x",
"[",
"i",
"%",
"dim",
"]",
"for",
"i",
"in",
"xrange",
"(",
"2",
"*",
"dim",
")",
"]",
")",
"N",
"=",
"8",
"i",
"=",
"self",
".",
"counter",
"%",
"dim",
"# f = sum(x[i:i + N]**2)",
"f",
"=",
"sum",
"(",
"x",
"[",
"np",
".",
"random",
".",
"randint",
"(",
"dim",
",",
"size",
"=",
"N",
")",
"]",
"**",
"2",
")",
"return",
"f"
]
| 37 | 17.090909 |
def gen_gradient(self, skip=0, step=1, vskip=0, vstep=1):
"""Generate gradient measurements
Parameters
----------
skip: int
distance between current electrodes
step: int
steplength between subsequent current dipoles
vskip: int
distance between voltage electrodes
vstep: int
steplength between subsequent voltage dipoles
"""
N = self.nr_electrodes
quadpoles = []
for a in range(1, N - skip, step):
b = a + skip + 1
for m in range(a + 1, b - vskip - 1, vstep):
n = m + vskip + 1
quadpoles.append((a, b, m, n))
configs = np.array(quadpoles)
if configs.size == 0:
return None
self.add_to_configs(configs)
return configs | [
"def",
"gen_gradient",
"(",
"self",
",",
"skip",
"=",
"0",
",",
"step",
"=",
"1",
",",
"vskip",
"=",
"0",
",",
"vstep",
"=",
"1",
")",
":",
"N",
"=",
"self",
".",
"nr_electrodes",
"quadpoles",
"=",
"[",
"]",
"for",
"a",
"in",
"range",
"(",
"1",
",",
"N",
"-",
"skip",
",",
"step",
")",
":",
"b",
"=",
"a",
"+",
"skip",
"+",
"1",
"for",
"m",
"in",
"range",
"(",
"a",
"+",
"1",
",",
"b",
"-",
"vskip",
"-",
"1",
",",
"vstep",
")",
":",
"n",
"=",
"m",
"+",
"vskip",
"+",
"1",
"quadpoles",
".",
"append",
"(",
"(",
"a",
",",
"b",
",",
"m",
",",
"n",
")",
")",
"configs",
"=",
"np",
".",
"array",
"(",
"quadpoles",
")",
"if",
"configs",
".",
"size",
"==",
"0",
":",
"return",
"None",
"self",
".",
"add_to_configs",
"(",
"configs",
")",
"return",
"configs"
]
| 28.517241 | 16.655172 |
def refresh(self):
"""Refresh tabwidget."""
if self.tabwidget.count():
editor = self.tabwidget.currentWidget()
else:
editor = None
self.find_widget.set_editor(editor) | [
"def",
"refresh",
"(",
"self",
")",
":",
"if",
"self",
".",
"tabwidget",
".",
"count",
"(",
")",
":",
"editor",
"=",
"self",
".",
"tabwidget",
".",
"currentWidget",
"(",
")",
"else",
":",
"editor",
"=",
"None",
"self",
".",
"find_widget",
".",
"set_editor",
"(",
"editor",
")"
]
| 30.857143 | 12 |
def make_directory(self, directory_name, *args, **kwargs):
""" :meth:`.WNetworkClientProto.make_directory` method implementation
"""
self.dav_client().mkdir(self.join_path(self.session_path(), directory_name)) | [
"def",
"make_directory",
"(",
"self",
",",
"directory_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"dav_client",
"(",
")",
".",
"mkdir",
"(",
"self",
".",
"join_path",
"(",
"self",
".",
"session_path",
"(",
")",
",",
"directory_name",
")",
")"
]
| 53 | 14 |
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity | [
"def",
"process_existing_ids",
"(",
"self",
",",
"entity",
":",
"List",
"[",
"dict",
"]",
")",
"->",
"List",
"[",
"dict",
"]",
":",
"label",
"=",
"entity",
"[",
"'label'",
"]",
"existing_ids",
"=",
"entity",
"[",
"'existing_ids'",
"]",
"for",
"existing_id",
"in",
"existing_ids",
":",
"if",
"'curie'",
"not",
"in",
"existing_id",
"or",
"'iri'",
"not",
"in",
"existing_id",
":",
"raise",
"ValueError",
"(",
"f'Missing needing key(s) in existing_ids for label: {label}'",
")",
"elif",
"len",
"(",
"existing_id",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"f'Extra keys not recognized in existing_ids for label: {label}'",
")",
"return",
"entity"
]
| 52.083333 | 15.75 |
def highest_expr_genes(
adata, n_top=30, show=None, save=None,
ax=None, gene_symbols=None, **kwds
):
"""\
Fraction of counts assigned to each gene over all cells.
Computes, for each gene, the fraction of counts assigned to that gene within
a cell. The `n_top` genes with the highest mean fraction over all cells are
plotted as boxplots.
This plot is similar to the `scater` package function `plotHighestExprs(type
= "highest-expression")`, see `here
<https://bioconductor.org/packages/devel/bioc/vignettes/scater/inst/doc/vignette-qc.html>`__. Quoting
from there:
*We expect to see the “usual suspects”, i.e., mitochondrial genes, actin,
ribosomal protein, MALAT1. A few spike-in transcripts may also be
present here, though if all of the spike-ins are in the top 50, it
suggests that too much spike-in RNA was added. A large number of
pseudo-genes or predicted genes may indicate problems with alignment.*
-- Davis McCarthy and Aaron Lun
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
n_top : `int`, optional (default:30)
Number of top
{show_save_ax}
gene_symbols : `str`, optional (default:None)
Key for field in .var that stores gene symbols if you do not want to use .var_names.
**kwds : keyword arguments
Are passed to `seaborn.boxplot`.
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes`.
"""
from scipy.sparse import issparse
# compute the percentage of each gene per cell
dat = normalize_per_cell(adata, counts_per_cell_after=100, copy=True)
# identify the genes with the highest mean
if issparse(dat.X):
dat.var['mean_percent'] = dat.X.mean(axis=0).A1
else:
dat.var['mean_percent'] = dat.X.mean(axis=0)
top = dat.var.sort_values('mean_percent', ascending=False).index[:n_top]
dat = dat[:, top]
columns = dat.var_names if gene_symbols is None else dat.var[gene_symbols]
dat = pd.DataFrame(dat.X.toarray(), index=dat.obs_names, columns=columns)
if not ax:
# figsize is hardcoded to produce a tall image. To change the fig size,
# a matplotlib.axes.Axes object needs to be passed.
height = (n_top * 0.2) + 1.5
fig, ax = plt.subplots(figsize=(5, height))
sns.boxplot(data=dat, orient='h', ax=ax, fliersize=1, **kwds)
ax.set_xlabel('% of total counts')
utils.savefig_or_show('highest_expr_genes', show=show, save=save)
return ax if show == False else None | [
"def",
"highest_expr_genes",
"(",
"adata",
",",
"n_top",
"=",
"30",
",",
"show",
"=",
"None",
",",
"save",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"gene_symbols",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"from",
"scipy",
".",
"sparse",
"import",
"issparse",
"# compute the percentage of each gene per cell",
"dat",
"=",
"normalize_per_cell",
"(",
"adata",
",",
"counts_per_cell_after",
"=",
"100",
",",
"copy",
"=",
"True",
")",
"# identify the genes with the highest mean",
"if",
"issparse",
"(",
"dat",
".",
"X",
")",
":",
"dat",
".",
"var",
"[",
"'mean_percent'",
"]",
"=",
"dat",
".",
"X",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
".",
"A1",
"else",
":",
"dat",
".",
"var",
"[",
"'mean_percent'",
"]",
"=",
"dat",
".",
"X",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"top",
"=",
"dat",
".",
"var",
".",
"sort_values",
"(",
"'mean_percent'",
",",
"ascending",
"=",
"False",
")",
".",
"index",
"[",
":",
"n_top",
"]",
"dat",
"=",
"dat",
"[",
":",
",",
"top",
"]",
"columns",
"=",
"dat",
".",
"var_names",
"if",
"gene_symbols",
"is",
"None",
"else",
"dat",
".",
"var",
"[",
"gene_symbols",
"]",
"dat",
"=",
"pd",
".",
"DataFrame",
"(",
"dat",
".",
"X",
".",
"toarray",
"(",
")",
",",
"index",
"=",
"dat",
".",
"obs_names",
",",
"columns",
"=",
"columns",
")",
"if",
"not",
"ax",
":",
"# figsize is hardcoded to produce a tall image. To change the fig size,",
"# a matplotlib.axes.Axes object needs to be passed.",
"height",
"=",
"(",
"n_top",
"*",
"0.2",
")",
"+",
"1.5",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"(",
"5",
",",
"height",
")",
")",
"sns",
".",
"boxplot",
"(",
"data",
"=",
"dat",
",",
"orient",
"=",
"'h'",
",",
"ax",
"=",
"ax",
",",
"fliersize",
"=",
"1",
",",
"*",
"*",
"kwds",
")",
"ax",
".",
"set_xlabel",
"(",
"'% of total counts'",
")",
"utils",
".",
"savefig_or_show",
"(",
"'highest_expr_genes'",
",",
"show",
"=",
"show",
",",
"save",
"=",
"save",
")",
"return",
"ax",
"if",
"show",
"==",
"False",
"else",
"None"
]
| 39.6875 | 23.515625 |
def from_timestamp(timestamp: TimestampPrimitive) -> ulid.ULID:
"""
Create a new :class:`~ulid.ulid.ULID` instance using a timestamp value of a supported type.
The following types are supported for timestamp values:
* :class:`~datetime.datetime`
* :class:`~int`
* :class:`~float`
* :class:`~str`
* :class:`~memoryview`
* :class:`~ulid.ulid.Timestamp`
* :class:`~ulid.ulid.ULID`
* :class:`~bytes`
* :class:`~bytearray`
:param timestamp: Unix timestamp in seconds
:type timestamp: See docstring for types
:return: ULID using given timestamp and new randomness
:rtype: :class:`~ulid.ulid.ULID`
:raises ValueError: when the value is an unsupported type
:raises ValueError: when the value is a string and cannot be Base32 decoded
:raises ValueError: when the value is or was converted to something 48 bits
"""
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.timestamp()
if isinstance(timestamp, (int, float)):
timestamp = int(timestamp * 1000.0).to_bytes(6, byteorder='big')
elif isinstance(timestamp, str):
timestamp = base32.decode_timestamp(timestamp)
elif isinstance(timestamp, memoryview):
timestamp = timestamp.tobytes()
elif isinstance(timestamp, ulid.Timestamp):
timestamp = timestamp.bytes
elif isinstance(timestamp, ulid.ULID):
timestamp = timestamp.timestamp().bytes
if not isinstance(timestamp, (bytes, bytearray)):
raise ValueError('Expected datetime, int, float, str, memoryview, Timestamp, ULID, '
'bytes, or bytearray; got {}'.format(type(timestamp).__name__))
length = len(timestamp)
if length != 6:
raise ValueError('Expects timestamp to be 48 bits; got {} bytes'.format(length))
randomness = os.urandom(10)
return ulid.ULID(timestamp + randomness) | [
"def",
"from_timestamp",
"(",
"timestamp",
":",
"TimestampPrimitive",
")",
"->",
"ulid",
".",
"ULID",
":",
"if",
"isinstance",
"(",
"timestamp",
",",
"datetime",
".",
"datetime",
")",
":",
"timestamp",
"=",
"timestamp",
".",
"timestamp",
"(",
")",
"if",
"isinstance",
"(",
"timestamp",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"timestamp",
"=",
"int",
"(",
"timestamp",
"*",
"1000.0",
")",
".",
"to_bytes",
"(",
"6",
",",
"byteorder",
"=",
"'big'",
")",
"elif",
"isinstance",
"(",
"timestamp",
",",
"str",
")",
":",
"timestamp",
"=",
"base32",
".",
"decode_timestamp",
"(",
"timestamp",
")",
"elif",
"isinstance",
"(",
"timestamp",
",",
"memoryview",
")",
":",
"timestamp",
"=",
"timestamp",
".",
"tobytes",
"(",
")",
"elif",
"isinstance",
"(",
"timestamp",
",",
"ulid",
".",
"Timestamp",
")",
":",
"timestamp",
"=",
"timestamp",
".",
"bytes",
"elif",
"isinstance",
"(",
"timestamp",
",",
"ulid",
".",
"ULID",
")",
":",
"timestamp",
"=",
"timestamp",
".",
"timestamp",
"(",
")",
".",
"bytes",
"if",
"not",
"isinstance",
"(",
"timestamp",
",",
"(",
"bytes",
",",
"bytearray",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Expected datetime, int, float, str, memoryview, Timestamp, ULID, '",
"'bytes, or bytearray; got {}'",
".",
"format",
"(",
"type",
"(",
"timestamp",
")",
".",
"__name__",
")",
")",
"length",
"=",
"len",
"(",
"timestamp",
")",
"if",
"length",
"!=",
"6",
":",
"raise",
"ValueError",
"(",
"'Expects timestamp to be 48 bits; got {} bytes'",
".",
"format",
"(",
"length",
")",
")",
"randomness",
"=",
"os",
".",
"urandom",
"(",
"10",
")",
"return",
"ulid",
".",
"ULID",
"(",
"timestamp",
"+",
"randomness",
")"
]
| 39.425532 | 19.042553 |
def CrearLiquidacion(self, nro_orden=None, cuit_comprador=None,
nro_act_comprador=None, nro_ing_bruto_comprador=None,
cod_tipo_operacion=None,
es_liquidacion_propia=None, es_canje=None,
cod_puerto=None, des_puerto_localidad=None, cod_grano=None,
cuit_vendedor=None, nro_ing_bruto_vendedor=None,
actua_corredor=None, liquida_corredor=None, cuit_corredor=None,
comision_corredor=None, nro_ing_bruto_corredor=None,
fecha_precio_operacion=None,
precio_ref_tn=None, cod_grado_ref=None, cod_grado_ent=None,
factor_ent=None, precio_flete_tn=None, cont_proteico=None,
alic_iva_operacion=None, campania_ppal=None,
cod_localidad_procedencia=None,
datos_adicionales=None, pto_emision=1, cod_prov_procedencia=None,
peso_neto_sin_certificado=None, val_grado_ent=None,
cod_localidad_procedencia_sin_certificado=None,
cod_prov_procedencia_sin_certificado=None,
nro_contrato=None,
**kwargs
):
"Inicializa internamente los datos de una liquidación para autorizar"
# limpio los campos especiales (segun validaciones de AFIP)
if alic_iva_operacion == 0:
alic_iva_operacion = None # no informar alicuota p/ monotributo
if val_grado_ent == 0:
val_grado_ent = None
# borrando datos corredor si no corresponden
if actua_corredor == "N":
cuit_corredor = None
comision_corredor = None
nro_ing_bruto_corredor = None
# si no corresponde elimino el peso neto certificado campo opcional
if not peso_neto_sin_certificado or not int(peso_neto_sin_certificado):
peso_neto_sin_certificado = None
if cod_puerto and int(cod_puerto) != 14:
des_puerto_localidad = None # validacion 1630
# limpio los campos opcionales para no enviarlos si no corresponde:
if cod_grado_ref == "":
cod_grado_ref = None
if cod_grado_ent == "":
cod_grado_ent = None
if val_grado_ent == 0:
val_grado_ent = None
# creo el diccionario con los campos generales de la liquidación:
self.liquidacion = dict(
ptoEmision=pto_emision,
nroOrden=nro_orden,
cuitComprador=cuit_comprador,
nroActComprador=nro_act_comprador,
nroIngBrutoComprador=nro_ing_bruto_comprador,
codTipoOperacion=cod_tipo_operacion,
esLiquidacionPropia=es_liquidacion_propia,
esCanje=es_canje,
codPuerto=cod_puerto,
desPuertoLocalidad=des_puerto_localidad,
codGrano=cod_grano,
cuitVendedor=cuit_vendedor,
nroIngBrutoVendedor=nro_ing_bruto_vendedor,
actuaCorredor=actua_corredor,
liquidaCorredor=liquida_corredor,
cuitCorredor=cuit_corredor,
comisionCorredor=comision_corredor,
nroIngBrutoCorredor=nro_ing_bruto_corredor,
fechaPrecioOperacion=fecha_precio_operacion,
precioRefTn=precio_ref_tn,
codGradoRef=cod_grado_ref,
codGradoEnt=cod_grado_ent,
valGradoEnt=val_grado_ent,
factorEnt=factor_ent,
precioFleteTn=precio_flete_tn,
contProteico=cont_proteico,
alicIvaOperacion=alic_iva_operacion,
campaniaPPal=campania_ppal,
codLocalidadProcedencia=cod_localidad_procedencia,
codProvProcedencia=cod_prov_procedencia,
datosAdicionales=datos_adicionales,
pesoNetoSinCertificado=peso_neto_sin_certificado,
numeroContrato=nro_contrato or None,
certificados=[],
)
# para compatibilidad hacia atras, "copiar" los campos si no hay cert:
if peso_neto_sin_certificado:
if cod_localidad_procedencia_sin_certificado is None:
cod_localidad_procedencia_sin_certificado = cod_localidad_procedencia
if cod_prov_procedencia_sin_certificado is None:
cod_prov_procedencia_sin_certificado = cod_prov_procedencia
self.liquidacion.update(dict(
codLocalidadProcedenciaSinCertificado=cod_localidad_procedencia_sin_certificado,
codProvProcedenciaSinCertificado=cod_prov_procedencia_sin_certificado,
))
# inicializo las listas que contentran las retenciones y deducciones:
self.retenciones = []
self.deducciones = []
self.percepciones = []
self.opcionales = [] # para anticipo
# limpio las estructuras internas no utilizables en este caso
self.certificacion = None
return True | [
"def",
"CrearLiquidacion",
"(",
"self",
",",
"nro_orden",
"=",
"None",
",",
"cuit_comprador",
"=",
"None",
",",
"nro_act_comprador",
"=",
"None",
",",
"nro_ing_bruto_comprador",
"=",
"None",
",",
"cod_tipo_operacion",
"=",
"None",
",",
"es_liquidacion_propia",
"=",
"None",
",",
"es_canje",
"=",
"None",
",",
"cod_puerto",
"=",
"None",
",",
"des_puerto_localidad",
"=",
"None",
",",
"cod_grano",
"=",
"None",
",",
"cuit_vendedor",
"=",
"None",
",",
"nro_ing_bruto_vendedor",
"=",
"None",
",",
"actua_corredor",
"=",
"None",
",",
"liquida_corredor",
"=",
"None",
",",
"cuit_corredor",
"=",
"None",
",",
"comision_corredor",
"=",
"None",
",",
"nro_ing_bruto_corredor",
"=",
"None",
",",
"fecha_precio_operacion",
"=",
"None",
",",
"precio_ref_tn",
"=",
"None",
",",
"cod_grado_ref",
"=",
"None",
",",
"cod_grado_ent",
"=",
"None",
",",
"factor_ent",
"=",
"None",
",",
"precio_flete_tn",
"=",
"None",
",",
"cont_proteico",
"=",
"None",
",",
"alic_iva_operacion",
"=",
"None",
",",
"campania_ppal",
"=",
"None",
",",
"cod_localidad_procedencia",
"=",
"None",
",",
"datos_adicionales",
"=",
"None",
",",
"pto_emision",
"=",
"1",
",",
"cod_prov_procedencia",
"=",
"None",
",",
"peso_neto_sin_certificado",
"=",
"None",
",",
"val_grado_ent",
"=",
"None",
",",
"cod_localidad_procedencia_sin_certificado",
"=",
"None",
",",
"cod_prov_procedencia_sin_certificado",
"=",
"None",
",",
"nro_contrato",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# limpio los campos especiales (segun validaciones de AFIP)",
"if",
"alic_iva_operacion",
"==",
"0",
":",
"alic_iva_operacion",
"=",
"None",
"# no informar alicuota p/ monotributo",
"if",
"val_grado_ent",
"==",
"0",
":",
"val_grado_ent",
"=",
"None",
"# borrando datos corredor si no corresponden",
"if",
"actua_corredor",
"==",
"\"N\"",
":",
"cuit_corredor",
"=",
"None",
"comision_corredor",
"=",
"None",
"nro_ing_bruto_corredor",
"=",
"None",
"# si no corresponde elimino el peso neto certificado campo opcional",
"if",
"not",
"peso_neto_sin_certificado",
"or",
"not",
"int",
"(",
"peso_neto_sin_certificado",
")",
":",
"peso_neto_sin_certificado",
"=",
"None",
"if",
"cod_puerto",
"and",
"int",
"(",
"cod_puerto",
")",
"!=",
"14",
":",
"des_puerto_localidad",
"=",
"None",
"# validacion 1630",
"# limpio los campos opcionales para no enviarlos si no corresponde:",
"if",
"cod_grado_ref",
"==",
"\"\"",
":",
"cod_grado_ref",
"=",
"None",
"if",
"cod_grado_ent",
"==",
"\"\"",
":",
"cod_grado_ent",
"=",
"None",
"if",
"val_grado_ent",
"==",
"0",
":",
"val_grado_ent",
"=",
"None",
"# creo el diccionario con los campos generales de la liquidación:",
"self",
".",
"liquidacion",
"=",
"dict",
"(",
"ptoEmision",
"=",
"pto_emision",
",",
"nroOrden",
"=",
"nro_orden",
",",
"cuitComprador",
"=",
"cuit_comprador",
",",
"nroActComprador",
"=",
"nro_act_comprador",
",",
"nroIngBrutoComprador",
"=",
"nro_ing_bruto_comprador",
",",
"codTipoOperacion",
"=",
"cod_tipo_operacion",
",",
"esLiquidacionPropia",
"=",
"es_liquidacion_propia",
",",
"esCanje",
"=",
"es_canje",
",",
"codPuerto",
"=",
"cod_puerto",
",",
"desPuertoLocalidad",
"=",
"des_puerto_localidad",
",",
"codGrano",
"=",
"cod_grano",
",",
"cuitVendedor",
"=",
"cuit_vendedor",
",",
"nroIngBrutoVendedor",
"=",
"nro_ing_bruto_vendedor",
",",
"actuaCorredor",
"=",
"actua_corredor",
",",
"liquidaCorredor",
"=",
"liquida_corredor",
",",
"cuitCorredor",
"=",
"cuit_corredor",
",",
"comisionCorredor",
"=",
"comision_corredor",
",",
"nroIngBrutoCorredor",
"=",
"nro_ing_bruto_corredor",
",",
"fechaPrecioOperacion",
"=",
"fecha_precio_operacion",
",",
"precioRefTn",
"=",
"precio_ref_tn",
",",
"codGradoRef",
"=",
"cod_grado_ref",
",",
"codGradoEnt",
"=",
"cod_grado_ent",
",",
"valGradoEnt",
"=",
"val_grado_ent",
",",
"factorEnt",
"=",
"factor_ent",
",",
"precioFleteTn",
"=",
"precio_flete_tn",
",",
"contProteico",
"=",
"cont_proteico",
",",
"alicIvaOperacion",
"=",
"alic_iva_operacion",
",",
"campaniaPPal",
"=",
"campania_ppal",
",",
"codLocalidadProcedencia",
"=",
"cod_localidad_procedencia",
",",
"codProvProcedencia",
"=",
"cod_prov_procedencia",
",",
"datosAdicionales",
"=",
"datos_adicionales",
",",
"pesoNetoSinCertificado",
"=",
"peso_neto_sin_certificado",
",",
"numeroContrato",
"=",
"nro_contrato",
"or",
"None",
",",
"certificados",
"=",
"[",
"]",
",",
")",
"# para compatibilidad hacia atras, \"copiar\" los campos si no hay cert:",
"if",
"peso_neto_sin_certificado",
":",
"if",
"cod_localidad_procedencia_sin_certificado",
"is",
"None",
":",
"cod_localidad_procedencia_sin_certificado",
"=",
"cod_localidad_procedencia",
"if",
"cod_prov_procedencia_sin_certificado",
"is",
"None",
":",
"cod_prov_procedencia_sin_certificado",
"=",
"cod_prov_procedencia",
"self",
".",
"liquidacion",
".",
"update",
"(",
"dict",
"(",
"codLocalidadProcedenciaSinCertificado",
"=",
"cod_localidad_procedencia_sin_certificado",
",",
"codProvProcedenciaSinCertificado",
"=",
"cod_prov_procedencia_sin_certificado",
",",
")",
")",
"# inicializo las listas que contentran las retenciones y deducciones:",
"self",
".",
"retenciones",
"=",
"[",
"]",
"self",
".",
"deducciones",
"=",
"[",
"]",
"self",
".",
"percepciones",
"=",
"[",
"]",
"self",
".",
"opcionales",
"=",
"[",
"]",
"# para anticipo",
"# limpio las estructuras internas no utilizables en este caso",
"self",
".",
"certificacion",
"=",
"None",
"return",
"True"
]
| 51.682692 | 21.028846 |
def part(self, target, reason=None):
"""quit a channel"""
if reason:
target += ' :' + reason
self.send_line('PART %s' % target) | [
"def",
"part",
"(",
"self",
",",
"target",
",",
"reason",
"=",
"None",
")",
":",
"if",
"reason",
":",
"target",
"+=",
"' :'",
"+",
"reason",
"self",
".",
"send_line",
"(",
"'PART %s'",
"%",
"target",
")"
]
| 31.8 | 6.6 |
def get_edge_string(self, i):
"""Return a string based on the bond order"""
order = self.orders[i]
if order == 0:
return Graph.get_edge_string(self, i)
else:
# pad with zeros to make sure that string sort is identical to number sort
return "%03i" % order | [
"def",
"get_edge_string",
"(",
"self",
",",
"i",
")",
":",
"order",
"=",
"self",
".",
"orders",
"[",
"i",
"]",
"if",
"order",
"==",
"0",
":",
"return",
"Graph",
".",
"get_edge_string",
"(",
"self",
",",
"i",
")",
"else",
":",
"# pad with zeros to make sure that string sort is identical to number sort",
"return",
"\"%03i\"",
"%",
"order"
]
| 39.375 | 16 |
def chown(self, path, owner, group, recursive=False):
"""
Use snakebite.chown/chgrp, if available.
One of owner or group must be set. Just setting group calls chgrp.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param owner: new owner, can be blank
:type owner: string
:param group: new group, can be blank
:type group: string
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
bite = self.get_bite()
if owner:
if group:
return all(bite.chown(self.list_path(path), "%s:%s" % (owner, group),
recurse=recursive))
return all(bite.chown(self.list_path(path), owner, recurse=recursive))
return list(bite.chgrp(self.list_path(path), group, recurse=recursive)) | [
"def",
"chown",
"(",
"self",
",",
"path",
",",
"owner",
",",
"group",
",",
"recursive",
"=",
"False",
")",
":",
"bite",
"=",
"self",
".",
"get_bite",
"(",
")",
"if",
"owner",
":",
"if",
"group",
":",
"return",
"all",
"(",
"bite",
".",
"chown",
"(",
"self",
".",
"list_path",
"(",
"path",
")",
",",
"\"%s:%s\"",
"%",
"(",
"owner",
",",
"group",
")",
",",
"recurse",
"=",
"recursive",
")",
")",
"return",
"all",
"(",
"bite",
".",
"chown",
"(",
"self",
".",
"list_path",
"(",
"path",
")",
",",
"owner",
",",
"recurse",
"=",
"recursive",
")",
")",
"return",
"list",
"(",
"bite",
".",
"chgrp",
"(",
"self",
".",
"list_path",
"(",
"path",
")",
",",
"group",
",",
"recurse",
"=",
"recursive",
")",
")"
]
| 42.565217 | 18.826087 |
def get_port_def(port_num, proto='tcp'):
'''
Given a port number and protocol, returns the port definition expected by
docker-py. For TCP ports this is simply an integer, for UDP ports this is
(port_num, 'udp').
port_num can also be a string in the format 'port_num/udp'. If so, the
"proto" argument will be ignored. The reason we need to be able to pass in
the protocol separately is because this function is sometimes invoked on
data derived from a port range (e.g. '2222-2223/udp'). In these cases the
protocol has already been stripped off and the port range resolved into the
start and end of the range, and get_port_def() is invoked once for each
port number in that range. So, rather than munge udp ports back into
strings before passing them to this function, the function will see if it
has a string and use the protocol from it if present.
This function does not catch the TypeError or ValueError which would be
raised if the port number is non-numeric. This function either needs to be
run on known good input, or should be run within a try/except that catches
these two exceptions.
'''
try:
port_num, _, port_num_proto = port_num.partition('/')
except AttributeError:
pass
else:
if port_num_proto:
proto = port_num_proto
try:
if proto.lower() == 'udp':
return int(port_num), 'udp'
except AttributeError:
pass
return int(port_num) | [
"def",
"get_port_def",
"(",
"port_num",
",",
"proto",
"=",
"'tcp'",
")",
":",
"try",
":",
"port_num",
",",
"_",
",",
"port_num_proto",
"=",
"port_num",
".",
"partition",
"(",
"'/'",
")",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"if",
"port_num_proto",
":",
"proto",
"=",
"port_num_proto",
"try",
":",
"if",
"proto",
".",
"lower",
"(",
")",
"==",
"'udp'",
":",
"return",
"int",
"(",
"port_num",
")",
",",
"'udp'",
"except",
"AttributeError",
":",
"pass",
"return",
"int",
"(",
"port_num",
")"
]
| 43.235294 | 26.823529 |
def canonical_text(self, text):
"""Standardize an input TeX-file contents.
Currently:
* removes comments, unwrapping comment-wrapped lines.
"""
out = []
line_continues_a_comment = False
for line in text.splitlines():
line,comment = self.comment_re.findall(line)[0]
if line_continues_a_comment == True:
out[-1] = out[-1] + line.lstrip()
else:
out.append(line)
line_continues_a_comment = len(comment) > 0
return '\n'.join(out).rstrip()+'\n' | [
"def",
"canonical_text",
"(",
"self",
",",
"text",
")",
":",
"out",
"=",
"[",
"]",
"line_continues_a_comment",
"=",
"False",
"for",
"line",
"in",
"text",
".",
"splitlines",
"(",
")",
":",
"line",
",",
"comment",
"=",
"self",
".",
"comment_re",
".",
"findall",
"(",
"line",
")",
"[",
"0",
"]",
"if",
"line_continues_a_comment",
"==",
"True",
":",
"out",
"[",
"-",
"1",
"]",
"=",
"out",
"[",
"-",
"1",
"]",
"+",
"line",
".",
"lstrip",
"(",
")",
"else",
":",
"out",
".",
"append",
"(",
"line",
")",
"line_continues_a_comment",
"=",
"len",
"(",
"comment",
")",
">",
"0",
"return",
"'\\n'",
".",
"join",
"(",
"out",
")",
".",
"rstrip",
"(",
")",
"+",
"'\\n'"
]
| 35.625 | 12.8125 |
def valid_hotp(
token,
secret,
last=1,
trials=1000,
digest_method=hashlib.sha1,
token_length=6,
):
"""Check if given token is valid for given secret. Return interval number
that was successful, or False if not found.
:param token: token being checked
:type token: int or str
:param secret: secret for which token is checked
:type secret: str
:param last: last used interval (start checking with next one)
:type last: int
:param trials: number of intervals to check after 'last'
:type trials: int
:param digest_method: method of generating digest (hashlib.sha1 by default)
:type digest_method: callable
:param token_length: length of the token (6 by default)
:type token_length: int
:return: interval number, or False if check unsuccessful
:rtype: int or bool
>>> secret = b'MFRGGZDFMZTWQ2LK'
>>> valid_hotp(713385, secret, last=1, trials=5)
4
>>> valid_hotp(865438, secret, last=1, trials=5)
False
>>> valid_hotp(713385, secret, last=4, trials=5)
False
"""
if not _is_possible_token(token, token_length=token_length):
return False
for i in six.moves.xrange(last + 1, last + trials + 1):
token_candidate = get_hotp(
secret=secret,
intervals_no=i,
digest_method=digest_method,
token_length=token_length,
)
if token_candidate == int(token):
return i
return False | [
"def",
"valid_hotp",
"(",
"token",
",",
"secret",
",",
"last",
"=",
"1",
",",
"trials",
"=",
"1000",
",",
"digest_method",
"=",
"hashlib",
".",
"sha1",
",",
"token_length",
"=",
"6",
",",
")",
":",
"if",
"not",
"_is_possible_token",
"(",
"token",
",",
"token_length",
"=",
"token_length",
")",
":",
"return",
"False",
"for",
"i",
"in",
"six",
".",
"moves",
".",
"xrange",
"(",
"last",
"+",
"1",
",",
"last",
"+",
"trials",
"+",
"1",
")",
":",
"token_candidate",
"=",
"get_hotp",
"(",
"secret",
"=",
"secret",
",",
"intervals_no",
"=",
"i",
",",
"digest_method",
"=",
"digest_method",
",",
"token_length",
"=",
"token_length",
",",
")",
"if",
"token_candidate",
"==",
"int",
"(",
"token",
")",
":",
"return",
"i",
"return",
"False"
]
| 31.826087 | 17.956522 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.