text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def header_echo(cls, request,
api_key: (Ptypes.header, String('API key'))) -> [
(200, 'Ok', String)]:
'''Echo the header parameter.'''
log.info('Echoing header param, value is: {}'.format(api_key))
for i in range(randint(0, MAX_LOOP_DURATION)):
yield
msg = 'The value sent was: {}'.format(api_key)
Respond(200, msg)
|
[
"def",
"header_echo",
"(",
"cls",
",",
"request",
",",
"api_key",
":",
"(",
"Ptypes",
".",
"header",
",",
"String",
"(",
"'API key'",
")",
")",
")",
"->",
"[",
"(",
"200",
",",
"'Ok'",
",",
"String",
")",
"]",
":",
"log",
".",
"info",
"(",
"'Echoing header param, value is: {}'",
".",
"format",
"(",
"api_key",
")",
")",
"for",
"i",
"in",
"range",
"(",
"randint",
"(",
"0",
",",
"MAX_LOOP_DURATION",
")",
")",
":",
"yield",
"msg",
"=",
"'The value sent was: {}'",
".",
"format",
"(",
"api_key",
")",
"Respond",
"(",
"200",
",",
"msg",
")"
] | 43.444444 | 15.888889 |
def determine_context(device_ids: List[int],
use_cpu: bool,
disable_device_locking: bool,
lock_dir: str,
exit_stack: ExitStack) -> List[mx.Context]:
"""
Determine the MXNet context to run on (CPU or GPU).
:param device_ids: List of device as defined from the CLI.
:param use_cpu: Whether to use the CPU instead of GPU(s).
:param disable_device_locking: Disable Sockeye's device locking feature.
:param lock_dir: Directory to place device lock files in.
:param exit_stack: An ExitStack from contextlib.
:return: A list with the context(s) to run on.
"""
if use_cpu:
context = [mx.cpu()]
else:
num_gpus = get_num_gpus()
check_condition(num_gpus >= 1,
"No GPUs found, consider running on the CPU with --use-cpu ")
if disable_device_locking:
context = expand_requested_device_ids(device_ids)
else:
context = exit_stack.enter_context(acquire_gpus(device_ids, lock_dir=lock_dir))
context = [mx.gpu(gpu_id) for gpu_id in context]
return context
|
[
"def",
"determine_context",
"(",
"device_ids",
":",
"List",
"[",
"int",
"]",
",",
"use_cpu",
":",
"bool",
",",
"disable_device_locking",
":",
"bool",
",",
"lock_dir",
":",
"str",
",",
"exit_stack",
":",
"ExitStack",
")",
"->",
"List",
"[",
"mx",
".",
"Context",
"]",
":",
"if",
"use_cpu",
":",
"context",
"=",
"[",
"mx",
".",
"cpu",
"(",
")",
"]",
"else",
":",
"num_gpus",
"=",
"get_num_gpus",
"(",
")",
"check_condition",
"(",
"num_gpus",
">=",
"1",
",",
"\"No GPUs found, consider running on the CPU with --use-cpu \"",
")",
"if",
"disable_device_locking",
":",
"context",
"=",
"expand_requested_device_ids",
"(",
"device_ids",
")",
"else",
":",
"context",
"=",
"exit_stack",
".",
"enter_context",
"(",
"acquire_gpus",
"(",
"device_ids",
",",
"lock_dir",
"=",
"lock_dir",
")",
")",
"context",
"=",
"[",
"mx",
".",
"gpu",
"(",
"gpu_id",
")",
"for",
"gpu_id",
"in",
"context",
"]",
"return",
"context"
] | 42.37037 | 18.148148 |
def persistent_load(self, pid):
"""
Reconstruct a GLC object using the persistent ID.
This method should not be used externally. It is required by the unpickler super class.
Parameters
----------
pid : The persistent ID used in pickle file to save the GLC object.
Returns
----------
The GLC object.
"""
if len(pid) == 2:
# Pre GLC-1.3 release behavior, without memorization
type_tag, filename = pid
abs_path = _os.path.join(self.gl_temp_storage_path, filename)
return _get_gl_object_from_persistent_id(type_tag, abs_path)
else:
# Post GLC-1.3 release behavior, with memorization
type_tag, filename, object_id = pid
if object_id in self.gl_object_memo:
return self.gl_object_memo[object_id]
else:
abs_path = _os.path.join(self.gl_temp_storage_path, filename)
obj = _get_gl_object_from_persistent_id(type_tag, abs_path)
self.gl_object_memo[object_id] = obj
return obj
|
[
"def",
"persistent_load",
"(",
"self",
",",
"pid",
")",
":",
"if",
"len",
"(",
"pid",
")",
"==",
"2",
":",
"# Pre GLC-1.3 release behavior, without memorization",
"type_tag",
",",
"filename",
"=",
"pid",
"abs_path",
"=",
"_os",
".",
"path",
".",
"join",
"(",
"self",
".",
"gl_temp_storage_path",
",",
"filename",
")",
"return",
"_get_gl_object_from_persistent_id",
"(",
"type_tag",
",",
"abs_path",
")",
"else",
":",
"# Post GLC-1.3 release behavior, with memorization",
"type_tag",
",",
"filename",
",",
"object_id",
"=",
"pid",
"if",
"object_id",
"in",
"self",
".",
"gl_object_memo",
":",
"return",
"self",
".",
"gl_object_memo",
"[",
"object_id",
"]",
"else",
":",
"abs_path",
"=",
"_os",
".",
"path",
".",
"join",
"(",
"self",
".",
"gl_temp_storage_path",
",",
"filename",
")",
"obj",
"=",
"_get_gl_object_from_persistent_id",
"(",
"type_tag",
",",
"abs_path",
")",
"self",
".",
"gl_object_memo",
"[",
"object_id",
"]",
"=",
"obj",
"return",
"obj"
] | 38.551724 | 22.62069 |
def show(self, n=10, headers=(), tablefmt="simple", floatfmt="g", numalign="decimal",
stralign="left", missingval=""):
"""
Pretty print first n rows of sequence as a table. See
https://bitbucket.org/astanin/python-tabulate for details on tabulate parameters
:param n: Number of rows to show
:param headers: Passed to tabulate
:param tablefmt: Passed to tabulate
:param floatfmt: Passed to tabulate
:param numalign: Passed to tabulate
:param stralign: Passed to tabulate
:param missingval: Passed to tabulate
"""
formatted_seq = self.tabulate(n=n, headers=headers, tablefmt=tablefmt,
floatfmt=floatfmt, numalign=numalign, stralign=stralign,
missingval=missingval)
print(formatted_seq)
|
[
"def",
"show",
"(",
"self",
",",
"n",
"=",
"10",
",",
"headers",
"=",
"(",
")",
",",
"tablefmt",
"=",
"\"simple\"",
",",
"floatfmt",
"=",
"\"g\"",
",",
"numalign",
"=",
"\"decimal\"",
",",
"stralign",
"=",
"\"left\"",
",",
"missingval",
"=",
"\"\"",
")",
":",
"formatted_seq",
"=",
"self",
".",
"tabulate",
"(",
"n",
"=",
"n",
",",
"headers",
"=",
"headers",
",",
"tablefmt",
"=",
"tablefmt",
",",
"floatfmt",
"=",
"floatfmt",
",",
"numalign",
"=",
"numalign",
",",
"stralign",
"=",
"stralign",
",",
"missingval",
"=",
"missingval",
")",
"print",
"(",
"formatted_seq",
")"
] | 47.777778 | 16.777778 |
def resample_from_array(
in_raster=None,
in_affine=None,
out_tile=None,
in_crs=None,
resampling="nearest",
nodataval=0
):
"""
Extract and resample from array to target tile.
Parameters
----------
in_raster : array
in_affine : ``Affine``
out_tile : ``BufferedTile``
resampling : string
one of rasterio's resampling methods (default: nearest)
nodataval : integer or float
raster nodata value (default: 0)
Returns
-------
resampled array : array
"""
# TODO rename function
if isinstance(in_raster, ma.MaskedArray):
pass
if isinstance(in_raster, np.ndarray):
in_raster = ma.MaskedArray(in_raster, mask=in_raster == nodataval)
elif isinstance(in_raster, ReferencedRaster):
in_affine = in_raster.affine
in_crs = in_raster.crs
in_raster = in_raster.data
elif isinstance(in_raster, tuple):
in_raster = ma.MaskedArray(
data=np.stack(in_raster),
mask=np.stack([
band.mask
if isinstance(band, ma.masked_array)
else np.where(band == nodataval, True, False)
for band in in_raster
]),
fill_value=nodataval
)
else:
raise TypeError("wrong input data type: %s" % type(in_raster))
if in_raster.ndim == 2:
in_raster = ma.expand_dims(in_raster, axis=0)
elif in_raster.ndim == 3:
pass
else:
raise TypeError("input array must have 2 or 3 dimensions")
if in_raster.fill_value != nodataval:
ma.set_fill_value(in_raster, nodataval)
out_shape = (in_raster.shape[0], ) + out_tile.shape
dst_data = np.empty(out_shape, in_raster.dtype)
in_raster = ma.masked_array(
data=in_raster.filled(), mask=in_raster.mask, fill_value=nodataval
)
reproject(
in_raster,
dst_data,
src_transform=in_affine,
src_crs=in_crs if in_crs else out_tile.crs,
dst_transform=out_tile.affine,
dst_crs=out_tile.crs,
resampling=Resampling[resampling]
)
return ma.MaskedArray(dst_data, mask=dst_data == nodataval)
|
[
"def",
"resample_from_array",
"(",
"in_raster",
"=",
"None",
",",
"in_affine",
"=",
"None",
",",
"out_tile",
"=",
"None",
",",
"in_crs",
"=",
"None",
",",
"resampling",
"=",
"\"nearest\"",
",",
"nodataval",
"=",
"0",
")",
":",
"# TODO rename function",
"if",
"isinstance",
"(",
"in_raster",
",",
"ma",
".",
"MaskedArray",
")",
":",
"pass",
"if",
"isinstance",
"(",
"in_raster",
",",
"np",
".",
"ndarray",
")",
":",
"in_raster",
"=",
"ma",
".",
"MaskedArray",
"(",
"in_raster",
",",
"mask",
"=",
"in_raster",
"==",
"nodataval",
")",
"elif",
"isinstance",
"(",
"in_raster",
",",
"ReferencedRaster",
")",
":",
"in_affine",
"=",
"in_raster",
".",
"affine",
"in_crs",
"=",
"in_raster",
".",
"crs",
"in_raster",
"=",
"in_raster",
".",
"data",
"elif",
"isinstance",
"(",
"in_raster",
",",
"tuple",
")",
":",
"in_raster",
"=",
"ma",
".",
"MaskedArray",
"(",
"data",
"=",
"np",
".",
"stack",
"(",
"in_raster",
")",
",",
"mask",
"=",
"np",
".",
"stack",
"(",
"[",
"band",
".",
"mask",
"if",
"isinstance",
"(",
"band",
",",
"ma",
".",
"masked_array",
")",
"else",
"np",
".",
"where",
"(",
"band",
"==",
"nodataval",
",",
"True",
",",
"False",
")",
"for",
"band",
"in",
"in_raster",
"]",
")",
",",
"fill_value",
"=",
"nodataval",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"wrong input data type: %s\"",
"%",
"type",
"(",
"in_raster",
")",
")",
"if",
"in_raster",
".",
"ndim",
"==",
"2",
":",
"in_raster",
"=",
"ma",
".",
"expand_dims",
"(",
"in_raster",
",",
"axis",
"=",
"0",
")",
"elif",
"in_raster",
".",
"ndim",
"==",
"3",
":",
"pass",
"else",
":",
"raise",
"TypeError",
"(",
"\"input array must have 2 or 3 dimensions\"",
")",
"if",
"in_raster",
".",
"fill_value",
"!=",
"nodataval",
":",
"ma",
".",
"set_fill_value",
"(",
"in_raster",
",",
"nodataval",
")",
"out_shape",
"=",
"(",
"in_raster",
".",
"shape",
"[",
"0",
"]",
",",
")",
"+",
"out_tile",
".",
"shape",
"dst_data",
"=",
"np",
".",
"empty",
"(",
"out_shape",
",",
"in_raster",
".",
"dtype",
")",
"in_raster",
"=",
"ma",
".",
"masked_array",
"(",
"data",
"=",
"in_raster",
".",
"filled",
"(",
")",
",",
"mask",
"=",
"in_raster",
".",
"mask",
",",
"fill_value",
"=",
"nodataval",
")",
"reproject",
"(",
"in_raster",
",",
"dst_data",
",",
"src_transform",
"=",
"in_affine",
",",
"src_crs",
"=",
"in_crs",
"if",
"in_crs",
"else",
"out_tile",
".",
"crs",
",",
"dst_transform",
"=",
"out_tile",
".",
"affine",
",",
"dst_crs",
"=",
"out_tile",
".",
"crs",
",",
"resampling",
"=",
"Resampling",
"[",
"resampling",
"]",
")",
"return",
"ma",
".",
"MaskedArray",
"(",
"dst_data",
",",
"mask",
"=",
"dst_data",
"==",
"nodataval",
")"
] | 30.271429 | 17.014286 |
def filter_(*permissions, **kwargs):
"""
Constructs a clause to filter all bearers or targets for a given
berarer or target.
"""
bearer = kwargs['bearer']
target = kwargs.get('target')
bearer_cls = type_for(bearer)
# We need a query object. There are many ways to get one, Either we can
# be passed one, or we can make one from the session. We can either be
# passed the session, or we can grab the session from the bearer passed.
if 'query' in kwargs:
query = kwargs['query']
elif 'session' in kwargs:
query = kwargs['session'].query(target)
else:
query = object_session(bearer).query(target)
getter = functools.partial(
registry.retrieve,
bearer=bearer_cls,
target=target)
try:
# Generate a hash of {rulefn: permission} that we can use later
# to collect all of the rules.
if len(permissions):
rules = {getter(permission=x): x for x in permissions}
else:
rules = {getter(): None}
except KeyError:
# No rules defined. Default to no permission.
return query.filter(sql.false())
# Invoke all the rules and collect the results
# Abusing reduce here to invoke each rule and send the return value (query)
# from one rule to the next one. In this way the query becomes
# increasingly decorated as it marches through the system.
# q == query
# r = (rulefn, permission)
reducer = lambda q, r: r[0](permission=r[1], query=q, bearer=bearer)
return reduce(reducer, six.iteritems(rules), query)
|
[
"def",
"filter_",
"(",
"*",
"permissions",
",",
"*",
"*",
"kwargs",
")",
":",
"bearer",
"=",
"kwargs",
"[",
"'bearer'",
"]",
"target",
"=",
"kwargs",
".",
"get",
"(",
"'target'",
")",
"bearer_cls",
"=",
"type_for",
"(",
"bearer",
")",
"# We need a query object. There are many ways to get one, Either we can",
"# be passed one, or we can make one from the session. We can either be",
"# passed the session, or we can grab the session from the bearer passed.",
"if",
"'query'",
"in",
"kwargs",
":",
"query",
"=",
"kwargs",
"[",
"'query'",
"]",
"elif",
"'session'",
"in",
"kwargs",
":",
"query",
"=",
"kwargs",
"[",
"'session'",
"]",
".",
"query",
"(",
"target",
")",
"else",
":",
"query",
"=",
"object_session",
"(",
"bearer",
")",
".",
"query",
"(",
"target",
")",
"getter",
"=",
"functools",
".",
"partial",
"(",
"registry",
".",
"retrieve",
",",
"bearer",
"=",
"bearer_cls",
",",
"target",
"=",
"target",
")",
"try",
":",
"# Generate a hash of {rulefn: permission} that we can use later",
"# to collect all of the rules.",
"if",
"len",
"(",
"permissions",
")",
":",
"rules",
"=",
"{",
"getter",
"(",
"permission",
"=",
"x",
")",
":",
"x",
"for",
"x",
"in",
"permissions",
"}",
"else",
":",
"rules",
"=",
"{",
"getter",
"(",
")",
":",
"None",
"}",
"except",
"KeyError",
":",
"# No rules defined. Default to no permission.",
"return",
"query",
".",
"filter",
"(",
"sql",
".",
"false",
"(",
")",
")",
"# Invoke all the rules and collect the results",
"# Abusing reduce here to invoke each rule and send the return value (query)",
"# from one rule to the next one. In this way the query becomes",
"# increasingly decorated as it marches through the system.",
"# q == query",
"# r = (rulefn, permission)",
"reducer",
"=",
"lambda",
"q",
",",
"r",
":",
"r",
"[",
"0",
"]",
"(",
"permission",
"=",
"r",
"[",
"1",
"]",
",",
"query",
"=",
"q",
",",
"bearer",
"=",
"bearer",
")",
"return",
"reduce",
"(",
"reducer",
",",
"six",
".",
"iteritems",
"(",
"rules",
")",
",",
"query",
")"
] | 33.319149 | 21.06383 |
def draw_image(image, x1, y1, x2 = None, y2 = None):
'''Draw an image.
The image's top-left corner is drawn at ``(x1, y1)``, and its lower-left at ``(x2, y2)``. If ``x2`` and ``y2`` are omitted, they
are calculated to render the image at its native resoultion.
Note that images can be flipped and scaled by providing alternative values for ``x2`` and ``y2``.
:param image: an :class:`Image` to draw
'''
if x2 is None:
x2 = x1 + image.width
if y2 is None:
y2 = y1 + image.height
lib.DrawImage(image._handle, x1, y1, x2, y2)
|
[
"def",
"draw_image",
"(",
"image",
",",
"x1",
",",
"y1",
",",
"x2",
"=",
"None",
",",
"y2",
"=",
"None",
")",
":",
"if",
"x2",
"is",
"None",
":",
"x2",
"=",
"x1",
"+",
"image",
".",
"width",
"if",
"y2",
"is",
"None",
":",
"y2",
"=",
"y1",
"+",
"image",
".",
"height",
"lib",
".",
"DrawImage",
"(",
"image",
".",
"_handle",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")"
] | 37.6 | 29.2 |
def files(self):
"""
Yield relative file paths specified in :attr:`metainfo`
Each paths starts with :attr:`name`.
Note that the paths may not exist. See :attr:`filepaths` for existing
files.
"""
info = self.metainfo['info']
if 'length' in info: # Singlefile
yield info['name']
elif 'files' in info: # Multifile torrent
rootdir = self.name
for fileinfo in info['files']:
yield os.path.join(rootdir, os.path.join(*fileinfo['path']))
|
[
"def",
"files",
"(",
"self",
")",
":",
"info",
"=",
"self",
".",
"metainfo",
"[",
"'info'",
"]",
"if",
"'length'",
"in",
"info",
":",
"# Singlefile",
"yield",
"info",
"[",
"'name'",
"]",
"elif",
"'files'",
"in",
"info",
":",
"# Multifile torrent",
"rootdir",
"=",
"self",
".",
"name",
"for",
"fileinfo",
"in",
"info",
"[",
"'files'",
"]",
":",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"rootdir",
",",
"os",
".",
"path",
".",
"join",
"(",
"*",
"fileinfo",
"[",
"'path'",
"]",
")",
")"
] | 34.125 | 16.875 |
def remove_terms(self, terms, ignore_absences=False):
'''Non destructive term removal.
Parameters
----------
terms : list
list of terms to remove
ignore_absences : bool, False by default
if term does not appear, don't raise an error, just move on.
Returns
-------
TermDocMatrix, new object with terms removed.
'''
idx_to_delete_list = self._build_term_index_list(ignore_absences, terms)
return self.remove_terms_by_indices(idx_to_delete_list)
|
[
"def",
"remove_terms",
"(",
"self",
",",
"terms",
",",
"ignore_absences",
"=",
"False",
")",
":",
"idx_to_delete_list",
"=",
"self",
".",
"_build_term_index_list",
"(",
"ignore_absences",
",",
"terms",
")",
"return",
"self",
".",
"remove_terms_by_indices",
"(",
"idx_to_delete_list",
")"
] | 33.8125 | 22.3125 |
def make_bindings_type(filenames,color_input,colorkey,file_dictionary,sidebar,bounds):
# instantiating string the main string block for the javascript block of html code
string = ''
'''
# logic for instantiating variable colorkey input
if not colorkeyfields == False:
colorkey = 'selectedText'
'''
# iterating through each geojson filename
count = 0
for row in filenames:
color_input = ''
colorkeyfields = False
count += 1
filename = row
zoomrange = ['','']
# reading in geojson file into memory
with open(filename) as data_file:
data = json.load(data_file)
#pprint(data)
# getting the featuretype which will later dictate what javascript splices are needed
data = data['features']
data = data[0]
featuretype = data['geometry']
featuretype = featuretype['type']
data = data['properties']
# logic for overwriting colorkey fields if it exists for the filename
# in the file dictionary
try:
colorkeyfields = file_dictionary[filename][str('colorkeyfields')]
except KeyError:
colorkeyfields = False
except TypeError:
colorkeyfields = False
if not colorkeyfields == False:
if len(colorkeyfields) == 1:
colorkey = colorkeyfields[0]
colorkeyfields = False
try:
zoomrange = file_dictionary[filename][str('zooms')]
except KeyError:
zoomrange = ['','']
except TypeError:
zoomrange = ['','']
# code for if the file_dictionary input isn't false
#(i.e. getting the color inputs out of dictionary variable)
if file_dictionary==False and colorkey == False:
# logic for getting the colorline for different feature types
# the point feature requires a different line of code
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
# setting minzoom and maxzoom to be sent into js parsing
minzoom,maxzoom = zoomrange
# getting filter file dictionary if filter_dictonary exists
if not file_dictionary == False:
filter_file_dictionary = file_dictionary[filename]
else:
filter_file_dictionary = False
# checking to see if a chart_dictionary exists
try:
chart_dictionary = filter_file_dictionary['chart_dictionary']
except KeyError:
chart_dictionary = False
except TypeError:
chart_dictionary = False
# sending min and max zoom into the function that makes the zoom block
zoomblock = make_zoom_block(minzoom,maxzoom,count,colorkeyfields,bounds,filter_file_dictionary)
# logic for if a color key is given
# HINT look here for rgb raw color integration in a color line
if not colorkey == '':
if row == filenames[0]:
if colorkey == 'selectedText':
colorkey = """feature.properties[%s]""" % colorkey
else:
colorkey = """feature.properties['%s']""" % colorkey
if featuretype == 'Point':
colorline = get_colorline_marker(str(colorkey))
else:
colorline = get_colorline_marker2(str(colorkey))
# this may be able to be deleted
# test later
# im not sure what the fuck its here for
if file_dictionary == False and colorkey == '':
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
if colorkey == '' and colorkeyfields == False:
if featuretype == 'Point':
colorline = get_colorline_marker(color_input)
else:
colorline = get_colorline_marker2(color_input)
# iterating through each header
headers = []
for row in data:
headers.append(str(row))
# logic for getting sidebar string that will be added in make_blockstr()
if sidebar == True:
sidebarstring = make_sidebar_string(headers,chart_dictionary)
else:
sidebarstring = ''
# section of javascript code dedicated to the adding the data layer
if count == 1:
blocky = """
function add%s() {
\n\tfunction addDataToMap%s(data, map) {
\t\tvar dataLayer = L.geoJson(data);
\t\tvar map = L.mapbox.map('map', 'mapbox.streets',{
\t\t\tzoom: 5
\t\t\t}).fitBounds(dataLayer.getBounds());
\t\tdataLayer.addTo(map)
\t}\n""" % (count,count)
else:
blocky = """
function add%s() {
\n\tfunction addDataToMap%s(data, map) {
\t\tvar dataLayer = L.geoJson(data);
\t\tdataLayer.addTo(map)
\t}\n""" % (count,count)
# making the string section that locally links the geojson file to the html document
'''
if not time == '':
preloc='\tfunction add%s() {\n' % (str(count))
loc = """\t$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap%s(data,map); });""" % (filename,count)
loc = preloc + loc
else:
'''
loc = """\t$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap%s(data,map); });""" % (filename,count)
# creating block to be added to the total or constituent string block
if featuretype == 'Point':
bindings = make_bindings(headers,count,colorline,featuretype,zoomblock,filename,sidebarstring,colorkeyfields)+'\n'
stringblock = blocky + loc + bindings
else:
bindings = make_bindings(headers,count,colorline,featuretype,zoomblock,filename,sidebarstring,colorkeyfields)+'\n'
stringblock = blocky + loc + bindings
# adding the stringblock (one geojson file javascript block) to the total string block
string += stringblock
# adding async function to end of string block
string = string + async_function_call(count)
return string
|
[
"def",
"make_bindings_type",
"(",
"filenames",
",",
"color_input",
",",
"colorkey",
",",
"file_dictionary",
",",
"sidebar",
",",
"bounds",
")",
":",
"# instantiating string the main string block for the javascript block of html code",
"string",
"=",
"''",
"# iterating through each geojson filename",
"count",
"=",
"0",
"for",
"row",
"in",
"filenames",
":",
"color_input",
"=",
"''",
"colorkeyfields",
"=",
"False",
"count",
"+=",
"1",
"filename",
"=",
"row",
"zoomrange",
"=",
"[",
"''",
",",
"''",
"]",
"# reading in geojson file into memory",
"with",
"open",
"(",
"filename",
")",
"as",
"data_file",
":",
"data",
"=",
"json",
".",
"load",
"(",
"data_file",
")",
"#pprint(data)",
"# getting the featuretype which will later dictate what javascript splices are needed",
"data",
"=",
"data",
"[",
"'features'",
"]",
"data",
"=",
"data",
"[",
"0",
"]",
"featuretype",
"=",
"data",
"[",
"'geometry'",
"]",
"featuretype",
"=",
"featuretype",
"[",
"'type'",
"]",
"data",
"=",
"data",
"[",
"'properties'",
"]",
"# logic for overwriting colorkey fields if it exists for the filename ",
"# in the file dictionary",
"try",
":",
"colorkeyfields",
"=",
"file_dictionary",
"[",
"filename",
"]",
"[",
"str",
"(",
"'colorkeyfields'",
")",
"]",
"except",
"KeyError",
":",
"colorkeyfields",
"=",
"False",
"except",
"TypeError",
":",
"colorkeyfields",
"=",
"False",
"if",
"not",
"colorkeyfields",
"==",
"False",
":",
"if",
"len",
"(",
"colorkeyfields",
")",
"==",
"1",
":",
"colorkey",
"=",
"colorkeyfields",
"[",
"0",
"]",
"colorkeyfields",
"=",
"False",
"try",
":",
"zoomrange",
"=",
"file_dictionary",
"[",
"filename",
"]",
"[",
"str",
"(",
"'zooms'",
")",
"]",
"except",
"KeyError",
":",
"zoomrange",
"=",
"[",
"''",
",",
"''",
"]",
"except",
"TypeError",
":",
"zoomrange",
"=",
"[",
"''",
",",
"''",
"]",
"# code for if the file_dictionary input isn't false ",
"#(i.e. getting the color inputs out of dictionary variable)",
"if",
"file_dictionary",
"==",
"False",
"and",
"colorkey",
"==",
"False",
":",
"# logic for getting the colorline for different feature types",
"# the point feature requires a different line of code",
"if",
"featuretype",
"==",
"'Point'",
":",
"colorline",
"=",
"get_colorline_marker",
"(",
"color_input",
")",
"else",
":",
"colorline",
"=",
"get_colorline_marker2",
"(",
"color_input",
")",
"# setting minzoom and maxzoom to be sent into js parsing ",
"minzoom",
",",
"maxzoom",
"=",
"zoomrange",
"# getting filter file dictionary if filter_dictonary exists",
"if",
"not",
"file_dictionary",
"==",
"False",
":",
"filter_file_dictionary",
"=",
"file_dictionary",
"[",
"filename",
"]",
"else",
":",
"filter_file_dictionary",
"=",
"False",
"# checking to see if a chart_dictionary exists",
"try",
":",
"chart_dictionary",
"=",
"filter_file_dictionary",
"[",
"'chart_dictionary'",
"]",
"except",
"KeyError",
":",
"chart_dictionary",
"=",
"False",
"except",
"TypeError",
":",
"chart_dictionary",
"=",
"False",
"# sending min and max zoom into the function that makes the zoom block",
"zoomblock",
"=",
"make_zoom_block",
"(",
"minzoom",
",",
"maxzoom",
",",
"count",
",",
"colorkeyfields",
",",
"bounds",
",",
"filter_file_dictionary",
")",
"# logic for if a color key is given ",
"# HINT look here for rgb raw color integration in a color line",
"if",
"not",
"colorkey",
"==",
"''",
":",
"if",
"row",
"==",
"filenames",
"[",
"0",
"]",
":",
"if",
"colorkey",
"==",
"'selectedText'",
":",
"colorkey",
"=",
"\"\"\"feature.properties[%s]\"\"\"",
"%",
"colorkey",
"else",
":",
"colorkey",
"=",
"\"\"\"feature.properties['%s']\"\"\"",
"%",
"colorkey",
"if",
"featuretype",
"==",
"'Point'",
":",
"colorline",
"=",
"get_colorline_marker",
"(",
"str",
"(",
"colorkey",
")",
")",
"else",
":",
"colorline",
"=",
"get_colorline_marker2",
"(",
"str",
"(",
"colorkey",
")",
")",
"# this may be able to be deleted ",
"# test later ",
"# im not sure what the fuck its here for ",
"if",
"file_dictionary",
"==",
"False",
"and",
"colorkey",
"==",
"''",
":",
"if",
"featuretype",
"==",
"'Point'",
":",
"colorline",
"=",
"get_colorline_marker",
"(",
"color_input",
")",
"else",
":",
"colorline",
"=",
"get_colorline_marker2",
"(",
"color_input",
")",
"if",
"colorkey",
"==",
"''",
"and",
"colorkeyfields",
"==",
"False",
":",
"if",
"featuretype",
"==",
"'Point'",
":",
"colorline",
"=",
"get_colorline_marker",
"(",
"color_input",
")",
"else",
":",
"colorline",
"=",
"get_colorline_marker2",
"(",
"color_input",
")",
"# iterating through each header ",
"headers",
"=",
"[",
"]",
"for",
"row",
"in",
"data",
":",
"headers",
".",
"append",
"(",
"str",
"(",
"row",
")",
")",
"# logic for getting sidebar string that will be added in make_blockstr()",
"if",
"sidebar",
"==",
"True",
":",
"sidebarstring",
"=",
"make_sidebar_string",
"(",
"headers",
",",
"chart_dictionary",
")",
"else",
":",
"sidebarstring",
"=",
"''",
"# section of javascript code dedicated to the adding the data layer ",
"if",
"count",
"==",
"1",
":",
"blocky",
"=",
"\"\"\"\n\tfunction add%s() { \n\t\\n\\tfunction addDataToMap%s(data, map) {\n\t\\t\\tvar dataLayer = L.geoJson(data);\n\t\\t\\tvar map = L.mapbox.map('map', 'mapbox.streets',{\n\t\\t\\t\\tzoom: 5\n\t\\t\\t\\t}).fitBounds(dataLayer.getBounds());\n\t\\t\\tdataLayer.addTo(map)\n\t\\t}\\n\"\"\"",
"%",
"(",
"count",
",",
"count",
")",
"else",
":",
"blocky",
"=",
"\"\"\"\n\tfunction add%s() { \n\t\\n\\tfunction addDataToMap%s(data, map) {\n\t\\t\\tvar dataLayer = L.geoJson(data);\n\t\\t\\tdataLayer.addTo(map)\n\t\\t}\\n\"\"\"",
"%",
"(",
"count",
",",
"count",
")",
"# making the string section that locally links the geojson file to the html document",
"'''\n\t\tif not time == '':\n\t\t\tpreloc='\\tfunction add%s() {\\n' % (str(count))\n\t\t\tloc = \"\"\"\\t$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap%s(data,map); });\"\"\" % (filename,count)\n\t\t\tloc = preloc + loc\n\t\telse: \n\t\t'''",
"loc",
"=",
"\"\"\"\\t$.getJSON('http://localhost:8000/%s',function(data) { addDataToMap%s(data,map); });\"\"\"",
"%",
"(",
"filename",
",",
"count",
")",
"# creating block to be added to the total or constituent string block",
"if",
"featuretype",
"==",
"'Point'",
":",
"bindings",
"=",
"make_bindings",
"(",
"headers",
",",
"count",
",",
"colorline",
",",
"featuretype",
",",
"zoomblock",
",",
"filename",
",",
"sidebarstring",
",",
"colorkeyfields",
")",
"+",
"'\\n'",
"stringblock",
"=",
"blocky",
"+",
"loc",
"+",
"bindings",
"else",
":",
"bindings",
"=",
"make_bindings",
"(",
"headers",
",",
"count",
",",
"colorline",
",",
"featuretype",
",",
"zoomblock",
",",
"filename",
",",
"sidebarstring",
",",
"colorkeyfields",
")",
"+",
"'\\n'",
"stringblock",
"=",
"blocky",
"+",
"loc",
"+",
"bindings",
"# adding the stringblock (one geojson file javascript block) to the total string block",
"string",
"+=",
"stringblock",
"# adding async function to end of string block",
"string",
"=",
"string",
"+",
"async_function_call",
"(",
"count",
")",
"return",
"string"
] | 31.538462 | 21.650888 |
def connect(host=DEFAULT_HOST, port=DEFAULT_PORT, base=DEFAULT_BASE,
chunk_size=multipart.default_chunk_size, **defaults):
"""Create a new :class:`~ipfsapi.Client` instance and connect to the
daemon to validate that its version is supported.
Raises
------
~ipfsapi.exceptions.VersionMismatch
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
All parameters are identical to those passed to the constructor of the
:class:`~ipfsapi.Client` class.
Returns
-------
~ipfsapi.Client
"""
# Create client instance
client = Client(host, port, base, chunk_size, **defaults)
# Query version number from daemon and validate it
assert_version(client.version()['Version'])
return client
|
[
"def",
"connect",
"(",
"host",
"=",
"DEFAULT_HOST",
",",
"port",
"=",
"DEFAULT_PORT",
",",
"base",
"=",
"DEFAULT_BASE",
",",
"chunk_size",
"=",
"multipart",
".",
"default_chunk_size",
",",
"*",
"*",
"defaults",
")",
":",
"# Create client instance",
"client",
"=",
"Client",
"(",
"host",
",",
"port",
",",
"base",
",",
"chunk_size",
",",
"*",
"*",
"defaults",
")",
"# Query version number from daemon and validate it",
"assert_version",
"(",
"client",
".",
"version",
"(",
")",
"[",
"'Version'",
"]",
")",
"return",
"client"
] | 29.965517 | 19.793103 |
def image(random=random, width=800, height=600, https=False, *args, **kwargs):
"""
Generate the address of a placeholder image.
>>> mock_random.seed(0)
>>> image(random=mock_random)
'http://dummyimage.com/800x600/292929/e3e3e3&text=mighty poop'
>>> image(random=mock_random, width=60, height=60)
'http://placekitten.com/60/60'
>>> image(random=mock_random, width=1920, height=1080)
'http://dummyimage.com/1920x1080/292929/e3e3e3&text=To get to Westeros, you need to go to Britchestown, then drive west.'
>>> image(random=mock_random, https=True, width=1920, height=1080)
'https://dummyimage.com/1920x1080/292929/e3e3e3&text=East Mysteryhall is in Westeros.'
"""
target_fn = noun
if width+height > 300:
target_fn = thing
if width+height > 2000:
target_fn = sentence
s = ""
if https:
s = "s"
if random.choice([True, False]):
return "http{s}://dummyimage.com/{width}x{height}/292929/e3e3e3&text={text}".format(
s=s,
width=width,
height=height,
text=target_fn(random=random))
else:
return "http{s}://placekitten.com/{width}/{height}".format(s=s, width=width, height=height)
|
[
"def",
"image",
"(",
"random",
"=",
"random",
",",
"width",
"=",
"800",
",",
"height",
"=",
"600",
",",
"https",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"target_fn",
"=",
"noun",
"if",
"width",
"+",
"height",
">",
"300",
":",
"target_fn",
"=",
"thing",
"if",
"width",
"+",
"height",
">",
"2000",
":",
"target_fn",
"=",
"sentence",
"s",
"=",
"\"\"",
"if",
"https",
":",
"s",
"=",
"\"s\"",
"if",
"random",
".",
"choice",
"(",
"[",
"True",
",",
"False",
"]",
")",
":",
"return",
"\"http{s}://dummyimage.com/{width}x{height}/292929/e3e3e3&text={text}\"",
".",
"format",
"(",
"s",
"=",
"s",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height",
",",
"text",
"=",
"target_fn",
"(",
"random",
"=",
"random",
")",
")",
"else",
":",
"return",
"\"http{s}://placekitten.com/{width}/{height}\"",
".",
"format",
"(",
"s",
"=",
"s",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height",
")"
] | 35.911765 | 24.852941 |
def create(domain_name, years, **kwargs):
'''
Try to register the specified domain name
domain_name
The domain name to be registered
years
Number of years to register
Returns the following information:
- Whether or not the domain was renewed successfully
- Whether or not WhoisGuard is enabled
- Whether or not registration is instant
- The amount charged for registration
- The domain ID
- The order ID
- The transaction ID
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains.create my-domain-name 2
'''
idn_codes = ('afr', 'alb', 'ara', 'arg', 'arm', 'asm', 'ast', 'ave', 'awa', 'aze', 'bak', 'bal', 'ban', 'baq',
'bas', 'bel', 'ben', 'bho', 'bos', 'bul', 'bur', 'car', 'cat', 'che', 'chi', 'chv', 'cop', 'cos',
'cze', 'dan', 'div', 'doi', 'dut', 'eng', 'est', 'fao', 'fij', 'fin', 'fre', 'fry', 'geo', 'ger',
'gla', 'gle', 'gon', 'gre', 'guj', 'heb', 'hin', 'hun', 'inc', 'ind', 'inh', 'isl', 'ita', 'jav',
'jpn', 'kas', 'kaz', 'khm', 'kir', 'kor', 'kur', 'lao', 'lav', 'lit', 'ltz', 'mal', 'mkd', 'mlt',
'mol', 'mon', 'mri', 'msa', 'nep', 'nor', 'ori', 'oss', 'pan', 'per', 'pol', 'por', 'pus', 'raj',
'rum', 'rus', 'san', 'scr', 'sin', 'slo', 'slv', 'smo', 'snd', 'som', 'spa', 'srd', 'srp', 'swa',
'swe', 'syr', 'tam', 'tel', 'tgk', 'tha', 'tib', 'tur', 'ukr', 'urd', 'uzb', 'vie', 'wel', 'yid')
require_opts = ['AdminAddress1', 'AdminCity', 'AdminCountry', 'AdminEmailAddress', 'AdminFirstName',
'AdminLastName', 'AdminPhone', 'AdminPostalCode', 'AdminStateProvince', 'AuxBillingAddress1',
'AuxBillingCity', 'AuxBillingCountry', 'AuxBillingEmailAddress', 'AuxBillingFirstName',
'AuxBillingLastName', 'AuxBillingPhone', 'AuxBillingPostalCode', 'AuxBillingStateProvince',
'RegistrantAddress1', 'RegistrantCity', 'RegistrantCountry', 'RegistrantEmailAddress',
'RegistrantFirstName', 'RegistrantLastName', 'RegistrantPhone', 'RegistrantPostalCode',
'RegistrantStateProvince', 'TechAddress1', 'TechCity', 'TechCountry', 'TechEmailAddress',
'TechFirstName', 'TechLastName', 'TechPhone', 'TechPostalCode', 'TechStateProvince', 'Years']
opts = salt.utils.namecheap.get_opts('namecheap.domains.create')
opts['DomainName'] = domain_name
opts['Years'] = six.text_type(years)
def add_to_opts(opts_dict, kwargs, value, suffix, prefices):
for prefix in prefices:
nextkey = prefix + suffix
if nextkey not in kwargs:
opts_dict[nextkey] = value
for key, value in six.iteritems(kwargs):
if key.startswith('Registrant'):
add_to_opts(opts, kwargs, value, key[10:], ['Tech', 'Admin', 'AuxBilling', 'Billing'])
if key.startswith('Tech'):
add_to_opts(opts, kwargs, value, key[4:], ['Registrant', 'Admin', 'AuxBilling', 'Billing'])
if key.startswith('Admin'):
add_to_opts(opts, kwargs, value, key[5:], ['Registrant', 'Tech', 'AuxBilling', 'Billing'])
if key.startswith('AuxBilling'):
add_to_opts(opts, kwargs, value, key[10:], ['Registrant', 'Tech', 'Admin', 'Billing'])
if key.startswith('Billing'):
add_to_opts(opts, kwargs, value, key[7:], ['Registrant', 'Tech', 'Admin', 'AuxBilling'])
if key == 'IdnCode' and key not in idn_codes:
log.error('Invalid IdnCode')
raise Exception('Invalid IdnCode')
opts[key] = value
for requiredkey in require_opts:
if requiredkey not in opts:
log.error('Missing required parameter \'%s\'', requiredkey)
raise Exception('Missing required parameter \'{0}\''.format(requiredkey))
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return {}
domainresult = response_xml.getElementsByTagName("DomainCreateResult")[0]
return salt.utils.namecheap.atts_to_dict(domainresult)
|
[
"def",
"create",
"(",
"domain_name",
",",
"years",
",",
"*",
"*",
"kwargs",
")",
":",
"idn_codes",
"=",
"(",
"'afr'",
",",
"'alb'",
",",
"'ara'",
",",
"'arg'",
",",
"'arm'",
",",
"'asm'",
",",
"'ast'",
",",
"'ave'",
",",
"'awa'",
",",
"'aze'",
",",
"'bak'",
",",
"'bal'",
",",
"'ban'",
",",
"'baq'",
",",
"'bas'",
",",
"'bel'",
",",
"'ben'",
",",
"'bho'",
",",
"'bos'",
",",
"'bul'",
",",
"'bur'",
",",
"'car'",
",",
"'cat'",
",",
"'che'",
",",
"'chi'",
",",
"'chv'",
",",
"'cop'",
",",
"'cos'",
",",
"'cze'",
",",
"'dan'",
",",
"'div'",
",",
"'doi'",
",",
"'dut'",
",",
"'eng'",
",",
"'est'",
",",
"'fao'",
",",
"'fij'",
",",
"'fin'",
",",
"'fre'",
",",
"'fry'",
",",
"'geo'",
",",
"'ger'",
",",
"'gla'",
",",
"'gle'",
",",
"'gon'",
",",
"'gre'",
",",
"'guj'",
",",
"'heb'",
",",
"'hin'",
",",
"'hun'",
",",
"'inc'",
",",
"'ind'",
",",
"'inh'",
",",
"'isl'",
",",
"'ita'",
",",
"'jav'",
",",
"'jpn'",
",",
"'kas'",
",",
"'kaz'",
",",
"'khm'",
",",
"'kir'",
",",
"'kor'",
",",
"'kur'",
",",
"'lao'",
",",
"'lav'",
",",
"'lit'",
",",
"'ltz'",
",",
"'mal'",
",",
"'mkd'",
",",
"'mlt'",
",",
"'mol'",
",",
"'mon'",
",",
"'mri'",
",",
"'msa'",
",",
"'nep'",
",",
"'nor'",
",",
"'ori'",
",",
"'oss'",
",",
"'pan'",
",",
"'per'",
",",
"'pol'",
",",
"'por'",
",",
"'pus'",
",",
"'raj'",
",",
"'rum'",
",",
"'rus'",
",",
"'san'",
",",
"'scr'",
",",
"'sin'",
",",
"'slo'",
",",
"'slv'",
",",
"'smo'",
",",
"'snd'",
",",
"'som'",
",",
"'spa'",
",",
"'srd'",
",",
"'srp'",
",",
"'swa'",
",",
"'swe'",
",",
"'syr'",
",",
"'tam'",
",",
"'tel'",
",",
"'tgk'",
",",
"'tha'",
",",
"'tib'",
",",
"'tur'",
",",
"'ukr'",
",",
"'urd'",
",",
"'uzb'",
",",
"'vie'",
",",
"'wel'",
",",
"'yid'",
")",
"require_opts",
"=",
"[",
"'AdminAddress1'",
",",
"'AdminCity'",
",",
"'AdminCountry'",
",",
"'AdminEmailAddress'",
",",
"'AdminFirstName'",
",",
"'AdminLastName'",
",",
"'AdminPhone'",
",",
"'AdminPostalCode'",
",",
"'AdminStateProvince'",
",",
"'AuxBillingAddress1'",
",",
"'AuxBillingCity'",
",",
"'AuxBillingCountry'",
",",
"'AuxBillingEmailAddress'",
",",
"'AuxBillingFirstName'",
",",
"'AuxBillingLastName'",
",",
"'AuxBillingPhone'",
",",
"'AuxBillingPostalCode'",
",",
"'AuxBillingStateProvince'",
",",
"'RegistrantAddress1'",
",",
"'RegistrantCity'",
",",
"'RegistrantCountry'",
",",
"'RegistrantEmailAddress'",
",",
"'RegistrantFirstName'",
",",
"'RegistrantLastName'",
",",
"'RegistrantPhone'",
",",
"'RegistrantPostalCode'",
",",
"'RegistrantStateProvince'",
",",
"'TechAddress1'",
",",
"'TechCity'",
",",
"'TechCountry'",
",",
"'TechEmailAddress'",
",",
"'TechFirstName'",
",",
"'TechLastName'",
",",
"'TechPhone'",
",",
"'TechPostalCode'",
",",
"'TechStateProvince'",
",",
"'Years'",
"]",
"opts",
"=",
"salt",
".",
"utils",
".",
"namecheap",
".",
"get_opts",
"(",
"'namecheap.domains.create'",
")",
"opts",
"[",
"'DomainName'",
"]",
"=",
"domain_name",
"opts",
"[",
"'Years'",
"]",
"=",
"six",
".",
"text_type",
"(",
"years",
")",
"def",
"add_to_opts",
"(",
"opts_dict",
",",
"kwargs",
",",
"value",
",",
"suffix",
",",
"prefices",
")",
":",
"for",
"prefix",
"in",
"prefices",
":",
"nextkey",
"=",
"prefix",
"+",
"suffix",
"if",
"nextkey",
"not",
"in",
"kwargs",
":",
"opts_dict",
"[",
"nextkey",
"]",
"=",
"value",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"kwargs",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"'Registrant'",
")",
":",
"add_to_opts",
"(",
"opts",
",",
"kwargs",
",",
"value",
",",
"key",
"[",
"10",
":",
"]",
",",
"[",
"'Tech'",
",",
"'Admin'",
",",
"'AuxBilling'",
",",
"'Billing'",
"]",
")",
"if",
"key",
".",
"startswith",
"(",
"'Tech'",
")",
":",
"add_to_opts",
"(",
"opts",
",",
"kwargs",
",",
"value",
",",
"key",
"[",
"4",
":",
"]",
",",
"[",
"'Registrant'",
",",
"'Admin'",
",",
"'AuxBilling'",
",",
"'Billing'",
"]",
")",
"if",
"key",
".",
"startswith",
"(",
"'Admin'",
")",
":",
"add_to_opts",
"(",
"opts",
",",
"kwargs",
",",
"value",
",",
"key",
"[",
"5",
":",
"]",
",",
"[",
"'Registrant'",
",",
"'Tech'",
",",
"'AuxBilling'",
",",
"'Billing'",
"]",
")",
"if",
"key",
".",
"startswith",
"(",
"'AuxBilling'",
")",
":",
"add_to_opts",
"(",
"opts",
",",
"kwargs",
",",
"value",
",",
"key",
"[",
"10",
":",
"]",
",",
"[",
"'Registrant'",
",",
"'Tech'",
",",
"'Admin'",
",",
"'Billing'",
"]",
")",
"if",
"key",
".",
"startswith",
"(",
"'Billing'",
")",
":",
"add_to_opts",
"(",
"opts",
",",
"kwargs",
",",
"value",
",",
"key",
"[",
"7",
":",
"]",
",",
"[",
"'Registrant'",
",",
"'Tech'",
",",
"'Admin'",
",",
"'AuxBilling'",
"]",
")",
"if",
"key",
"==",
"'IdnCode'",
"and",
"key",
"not",
"in",
"idn_codes",
":",
"log",
".",
"error",
"(",
"'Invalid IdnCode'",
")",
"raise",
"Exception",
"(",
"'Invalid IdnCode'",
")",
"opts",
"[",
"key",
"]",
"=",
"value",
"for",
"requiredkey",
"in",
"require_opts",
":",
"if",
"requiredkey",
"not",
"in",
"opts",
":",
"log",
".",
"error",
"(",
"'Missing required parameter \\'%s\\''",
",",
"requiredkey",
")",
"raise",
"Exception",
"(",
"'Missing required parameter \\'{0}\\''",
".",
"format",
"(",
"requiredkey",
")",
")",
"response_xml",
"=",
"salt",
".",
"utils",
".",
"namecheap",
".",
"post_request",
"(",
"opts",
")",
"if",
"response_xml",
"is",
"None",
":",
"return",
"{",
"}",
"domainresult",
"=",
"response_xml",
".",
"getElementsByTagName",
"(",
"\"DomainCreateResult\"",
")",
"[",
"0",
"]",
"return",
"salt",
".",
"utils",
".",
"namecheap",
".",
"atts_to_dict",
"(",
"domainresult",
")"
] | 46.942529 | 32.689655 |
def _jgezerou8(ins):
""" Jumps if top of the stack (8bit) is >= 0 to arg(1)
Always TRUE for unsigned
"""
output = []
value = ins.quad[1]
if not is_int(value):
output = _8bit_oper(value)
output.append('jp %s' % str(ins.quad[2]))
return output
|
[
"def",
"_jgezerou8",
"(",
"ins",
")",
":",
"output",
"=",
"[",
"]",
"value",
"=",
"ins",
".",
"quad",
"[",
"1",
"]",
"if",
"not",
"is_int",
"(",
"value",
")",
":",
"output",
"=",
"_8bit_oper",
"(",
"value",
")",
"output",
".",
"append",
"(",
"'jp %s'",
"%",
"str",
"(",
"ins",
".",
"quad",
"[",
"2",
"]",
")",
")",
"return",
"output"
] | 25.090909 | 14.454545 |
def convert_magicc6_to_magicc7_variables(variables, inverse=False):
"""
Convert MAGICC6 variables to MAGICC7 variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert MAGICC7 variables to MAGICC6
variables
Raises
------
ValueError
If you try to convert HFC245ca, or some variant thereof, you will get a
ValueError. The reason is that this variable was never meant to be included in
MAGICC6, it was just an accident. See, for example, the text in the
description section of ``pymagicc/MAGICC6/run/HISTRCP_HFC245fa_CONC.IN``:
"...HFC245fa, rather than HFC245ca, is the actually used isomer.".
Returns
-------
``type(variables)``
Set of converted variables
"""
if isinstance(variables, (list, pd.Index)):
return [
_apply_convert_magicc6_to_magicc7_variables(v, inverse) for v in variables
]
else:
return _apply_convert_magicc6_to_magicc7_variables(variables, inverse)
|
[
"def",
"convert_magicc6_to_magicc7_variables",
"(",
"variables",
",",
"inverse",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"variables",
",",
"(",
"list",
",",
"pd",
".",
"Index",
")",
")",
":",
"return",
"[",
"_apply_convert_magicc6_to_magicc7_variables",
"(",
"v",
",",
"inverse",
")",
"for",
"v",
"in",
"variables",
"]",
"else",
":",
"return",
"_apply_convert_magicc6_to_magicc7_variables",
"(",
"variables",
",",
"inverse",
")"
] | 32.878788 | 27.121212 |
def update_links(self, request, admin_site=None):
"""
Called to update the widget's urls. Tries to find the
bundle for the model that this foreign key points to and then
asks it for the urls for adding and listing and sets them on
this widget instance. The urls are only set if request.user
has permissions on that url.
:param request: The request for which this widget is being rendered.
:param admin_site: If provided, the `admin_site` is used to lookup \
the bundle that is registered as the primary url for the model \
that this foreign key points to.
"""
if admin_site:
bundle = admin_site.get_bundle_for_model(self.model.to)
if bundle:
self._api_link = self._get_bundle_link(bundle, self.view,
request.user)
self._add_link = self._get_bundle_link(bundle, self.add_view,
request.user)
|
[
"def",
"update_links",
"(",
"self",
",",
"request",
",",
"admin_site",
"=",
"None",
")",
":",
"if",
"admin_site",
":",
"bundle",
"=",
"admin_site",
".",
"get_bundle_for_model",
"(",
"self",
".",
"model",
".",
"to",
")",
"if",
"bundle",
":",
"self",
".",
"_api_link",
"=",
"self",
".",
"_get_bundle_link",
"(",
"bundle",
",",
"self",
".",
"view",
",",
"request",
".",
"user",
")",
"self",
".",
"_add_link",
"=",
"self",
".",
"_get_bundle_link",
"(",
"bundle",
",",
"self",
".",
"add_view",
",",
"request",
".",
"user",
")"
] | 49.190476 | 23.380952 |
async def redirect_async(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if self._ext_connection:
raise ValueError(
"Clients with a shared connection cannot be "
"automatically redirected.")
if self.message_handler:
await self.message_handler.destroy_async()
self.message_handler = None
self._shutdown = False
self._last_activity_timestamp = None
self._was_message_received = False
self._remote_address = address.Source(redirect.address)
await self._redirect_async(redirect, auth)
|
[
"async",
"def",
"redirect_async",
"(",
"self",
",",
"redirect",
",",
"auth",
")",
":",
"if",
"self",
".",
"_ext_connection",
":",
"raise",
"ValueError",
"(",
"\"Clients with a shared connection cannot be \"",
"\"automatically redirected.\"",
")",
"if",
"self",
".",
"message_handler",
":",
"await",
"self",
".",
"message_handler",
".",
"destroy_async",
"(",
")",
"self",
".",
"message_handler",
"=",
"None",
"self",
".",
"_shutdown",
"=",
"False",
"self",
".",
"_last_activity_timestamp",
"=",
"None",
"self",
".",
"_was_message_received",
"=",
"False",
"self",
".",
"_remote_address",
"=",
"address",
".",
"Source",
"(",
"redirect",
".",
"address",
")",
"await",
"self",
".",
"_redirect_async",
"(",
"redirect",
",",
"auth",
")"
] | 41.045455 | 13.909091 |
def confusion_matrix(predicted_essential, expected_essential,
predicted_nonessential, expected_nonessential):
"""
Compute a representation of the confusion matrix.
Parameters
----------
predicted_essential : set
expected_essential : set
predicted_nonessential : set
expected_nonessential : set
Returns
-------
dict
Confusion matrix as different keys of a dictionary. The abbreviated
keys correspond to the ones used in [1]_.
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
"""
true_positive = predicted_essential & expected_essential
tp = len(true_positive)
true_negative = predicted_nonessential & expected_nonessential
tn = len(true_negative)
false_positive = predicted_essential - expected_essential
fp = len(false_positive)
false_negative = predicted_nonessential - expected_nonessential
fn = len(false_negative)
# sensitivity or true positive rate
try:
tpr = tp / (tp + fn)
except ZeroDivisionError:
tpr = None
# specificity or true negative rate
try:
tnr = tn / (tn + fp)
except ZeroDivisionError:
tnr = None
# precision or positive predictive value
try:
ppv = tp / (tp + fp)
except ZeroDivisionError:
ppv = None
# false discovery rate
fdr = 1 - ppv
# accuracy
try:
acc = (tp + tn) / (tp + tn + fp + fn)
except ZeroDivisionError:
acc = None
# Compute Matthews correlation coefficient.
try:
mcc = (tp * tn - fp * fn) /\
sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
except ZeroDivisionError:
mcc = None
return {
"TP": list(true_positive),
"TN": list(true_negative),
"FP": list(false_positive),
"FN": list(false_negative),
"TPR": tpr,
"TNR": tnr,
"PPV": ppv,
"FDR": fdr,
"ACC": acc,
"MCC": mcc
}
|
[
"def",
"confusion_matrix",
"(",
"predicted_essential",
",",
"expected_essential",
",",
"predicted_nonessential",
",",
"expected_nonessential",
")",
":",
"true_positive",
"=",
"predicted_essential",
"&",
"expected_essential",
"tp",
"=",
"len",
"(",
"true_positive",
")",
"true_negative",
"=",
"predicted_nonessential",
"&",
"expected_nonessential",
"tn",
"=",
"len",
"(",
"true_negative",
")",
"false_positive",
"=",
"predicted_essential",
"-",
"expected_essential",
"fp",
"=",
"len",
"(",
"false_positive",
")",
"false_negative",
"=",
"predicted_nonessential",
"-",
"expected_nonessential",
"fn",
"=",
"len",
"(",
"false_negative",
")",
"# sensitivity or true positive rate",
"try",
":",
"tpr",
"=",
"tp",
"/",
"(",
"tp",
"+",
"fn",
")",
"except",
"ZeroDivisionError",
":",
"tpr",
"=",
"None",
"# specificity or true negative rate",
"try",
":",
"tnr",
"=",
"tn",
"/",
"(",
"tn",
"+",
"fp",
")",
"except",
"ZeroDivisionError",
":",
"tnr",
"=",
"None",
"# precision or positive predictive value",
"try",
":",
"ppv",
"=",
"tp",
"/",
"(",
"tp",
"+",
"fp",
")",
"except",
"ZeroDivisionError",
":",
"ppv",
"=",
"None",
"# false discovery rate",
"fdr",
"=",
"1",
"-",
"ppv",
"# accuracy",
"try",
":",
"acc",
"=",
"(",
"tp",
"+",
"tn",
")",
"/",
"(",
"tp",
"+",
"tn",
"+",
"fp",
"+",
"fn",
")",
"except",
"ZeroDivisionError",
":",
"acc",
"=",
"None",
"# Compute Matthews correlation coefficient.",
"try",
":",
"mcc",
"=",
"(",
"tp",
"*",
"tn",
"-",
"fp",
"*",
"fn",
")",
"/",
"sqrt",
"(",
"(",
"tp",
"+",
"fp",
")",
"*",
"(",
"tp",
"+",
"fn",
")",
"*",
"(",
"tn",
"+",
"fp",
")",
"*",
"(",
"tn",
"+",
"fn",
")",
")",
"except",
"ZeroDivisionError",
":",
"mcc",
"=",
"None",
"return",
"{",
"\"TP\"",
":",
"list",
"(",
"true_positive",
")",
",",
"\"TN\"",
":",
"list",
"(",
"true_negative",
")",
",",
"\"FP\"",
":",
"list",
"(",
"false_positive",
")",
",",
"\"FN\"",
":",
"list",
"(",
"false_negative",
")",
",",
"\"TPR\"",
":",
"tpr",
",",
"\"TNR\"",
":",
"tnr",
",",
"\"PPV\"",
":",
"ppv",
",",
"\"FDR\"",
":",
"fdr",
",",
"\"ACC\"",
":",
"acc",
",",
"\"MCC\"",
":",
"mcc",
"}"
] | 27.805556 | 18.888889 |
def _get_axial_shifts(ndim=2, include_diagonals=False):
r'''
Helper function to generate the axial shifts that will be performed on
the image to identify bordering pixels/voxels
'''
if ndim == 2:
if include_diagonals:
neighbors = square(3)
else:
neighbors = diamond(1)
neighbors[1, 1] = 0
x, y = np.where(neighbors)
x -= 1
y -= 1
return np.vstack((x, y)).T
else:
if include_diagonals:
neighbors = cube(3)
else:
neighbors = octahedron(1)
neighbors[1, 1, 1] = 0
x, y, z = np.where(neighbors)
x -= 1
y -= 1
z -= 1
return np.vstack((x, y, z)).T
|
[
"def",
"_get_axial_shifts",
"(",
"ndim",
"=",
"2",
",",
"include_diagonals",
"=",
"False",
")",
":",
"if",
"ndim",
"==",
"2",
":",
"if",
"include_diagonals",
":",
"neighbors",
"=",
"square",
"(",
"3",
")",
"else",
":",
"neighbors",
"=",
"diamond",
"(",
"1",
")",
"neighbors",
"[",
"1",
",",
"1",
"]",
"=",
"0",
"x",
",",
"y",
"=",
"np",
".",
"where",
"(",
"neighbors",
")",
"x",
"-=",
"1",
"y",
"-=",
"1",
"return",
"np",
".",
"vstack",
"(",
"(",
"x",
",",
"y",
")",
")",
".",
"T",
"else",
":",
"if",
"include_diagonals",
":",
"neighbors",
"=",
"cube",
"(",
"3",
")",
"else",
":",
"neighbors",
"=",
"octahedron",
"(",
"1",
")",
"neighbors",
"[",
"1",
",",
"1",
",",
"1",
"]",
"=",
"0",
"x",
",",
"y",
",",
"z",
"=",
"np",
".",
"where",
"(",
"neighbors",
")",
"x",
"-=",
"1",
"y",
"-=",
"1",
"z",
"-=",
"1",
"return",
"np",
".",
"vstack",
"(",
"(",
"x",
",",
"y",
",",
"z",
")",
")",
".",
"T"
] | 27.192308 | 17.269231 |
def optional(validator):
"""
A validator that makes an attribute optional. An optional attribute is one
which can be set to ``None`` in addition to satisfying the requirements of
the sub-validator.
:param validator: A validator (or a list of validators) that is used for
non-``None`` values.
:type validator: callable or :class:`list` of callables.
.. versionadded:: 15.1.0
.. versionchanged:: 17.1.0 *validator* can be a list of validators.
"""
if isinstance(validator, list):
return _OptionalValidator(_AndValidator(validator))
return _OptionalValidator(validator)
|
[
"def",
"optional",
"(",
"validator",
")",
":",
"if",
"isinstance",
"(",
"validator",
",",
"list",
")",
":",
"return",
"_OptionalValidator",
"(",
"_AndValidator",
"(",
"validator",
")",
")",
"return",
"_OptionalValidator",
"(",
"validator",
")"
] | 38.375 | 20.375 |
def get(self, request, *args, **kwargs):
"""
Method for handling GET requests.
Calls the `render` method with the following
items in context:
* **queryset** - Objects to perform action on
"""
queryset = self.get_selected(request)
return self.render(request, queryset = queryset)
|
[
"def",
"get",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"queryset",
"=",
"self",
".",
"get_selected",
"(",
"request",
")",
"return",
"self",
".",
"render",
"(",
"request",
",",
"queryset",
"=",
"queryset",
")"
] | 30.363636 | 12.909091 |
def _ExportEvents(
self, storage_reader, output_module, deduplicate_events=True,
event_filter=None, time_slice=None, use_time_slicer=False):
"""Exports events using an output module.
Args:
storage_reader (StorageReader): storage reader.
output_module (OutputModule): output module.
deduplicate_events (Optional[bool]): True if events should be
deduplicated.
event_filter (Optional[FilterObject]): event filter.
time_slice (Optional[TimeRange]): time range that defines a time slice
to filter events.
use_time_slicer (Optional[bool]): True if the 'time slicer' should be
used. The 'time slicer' will provide a context of events around
an event of interest.
"""
self._status = definitions.STATUS_INDICATOR_EXPORTING
time_slice_buffer = None
time_slice_range = None
if time_slice:
if time_slice.event_timestamp is not None:
time_slice_range = storage_time_range.TimeRange(
time_slice.start_timestamp, time_slice.end_timestamp)
if use_time_slicer:
time_slice_buffer = bufferlib.CircularBuffer(time_slice.duration)
filter_limit = getattr(event_filter, 'limit', None)
forward_entries = 0
self._events_status.number_of_filtered_events = 0
self._events_status.number_of_events_from_time_slice = 0
for event in storage_reader.GetSortedEvents(time_range=time_slice_range):
event_data_identifier = event.GetEventDataIdentifier()
if event_data_identifier:
event_data = storage_reader.GetEventDataByIdentifier(
event_data_identifier)
if event_data:
for attribute_name, attribute_value in event_data.GetAttributes():
setattr(event, attribute_name, attribute_value)
event_identifier = event.GetIdentifier()
event.tag = self._event_tag_index.GetEventTagByIdentifier(
storage_reader, event_identifier)
if time_slice_range and event.timestamp != time_slice.event_timestamp:
self._events_status.number_of_events_from_time_slice += 1
if event_filter:
filter_match = event_filter.Match(event)
else:
filter_match = None
# pylint: disable=singleton-comparison
if filter_match == False:
if not time_slice_buffer:
self._events_status.number_of_filtered_events += 1
elif forward_entries == 0:
time_slice_buffer.Append(event)
self._events_status.number_of_filtered_events += 1
elif forward_entries <= time_slice_buffer.size:
self._ExportEvent(
output_module, event, deduplicate_events=deduplicate_events)
self._number_of_consumed_events += 1
self._events_status.number_of_events_from_time_slice += 1
forward_entries += 1
else:
# We reached the maximum size of the time slice and don't need to
# include other entries.
self._events_status.number_of_filtered_events += 1
forward_entries = 0
else:
# pylint: disable=singleton-comparison
if filter_match == True and time_slice_buffer:
# Empty the time slice buffer.
for event_in_buffer in time_slice_buffer.Flush():
self._ExportEvent(
output_module, event_in_buffer,
deduplicate_events=deduplicate_events)
self._number_of_consumed_events += 1
self._events_status.number_of_filtered_events += 1
self._events_status.number_of_events_from_time_slice += 1
forward_entries = 1
self._ExportEvent(
output_module, event, deduplicate_events=deduplicate_events)
self._number_of_consumed_events += 1
# pylint: disable=singleton-comparison
if (filter_match == True and filter_limit and
filter_limit == self._number_of_consumed_events):
break
self._FlushExportBuffer(output_module)
|
[
"def",
"_ExportEvents",
"(",
"self",
",",
"storage_reader",
",",
"output_module",
",",
"deduplicate_events",
"=",
"True",
",",
"event_filter",
"=",
"None",
",",
"time_slice",
"=",
"None",
",",
"use_time_slicer",
"=",
"False",
")",
":",
"self",
".",
"_status",
"=",
"definitions",
".",
"STATUS_INDICATOR_EXPORTING",
"time_slice_buffer",
"=",
"None",
"time_slice_range",
"=",
"None",
"if",
"time_slice",
":",
"if",
"time_slice",
".",
"event_timestamp",
"is",
"not",
"None",
":",
"time_slice_range",
"=",
"storage_time_range",
".",
"TimeRange",
"(",
"time_slice",
".",
"start_timestamp",
",",
"time_slice",
".",
"end_timestamp",
")",
"if",
"use_time_slicer",
":",
"time_slice_buffer",
"=",
"bufferlib",
".",
"CircularBuffer",
"(",
"time_slice",
".",
"duration",
")",
"filter_limit",
"=",
"getattr",
"(",
"event_filter",
",",
"'limit'",
",",
"None",
")",
"forward_entries",
"=",
"0",
"self",
".",
"_events_status",
".",
"number_of_filtered_events",
"=",
"0",
"self",
".",
"_events_status",
".",
"number_of_events_from_time_slice",
"=",
"0",
"for",
"event",
"in",
"storage_reader",
".",
"GetSortedEvents",
"(",
"time_range",
"=",
"time_slice_range",
")",
":",
"event_data_identifier",
"=",
"event",
".",
"GetEventDataIdentifier",
"(",
")",
"if",
"event_data_identifier",
":",
"event_data",
"=",
"storage_reader",
".",
"GetEventDataByIdentifier",
"(",
"event_data_identifier",
")",
"if",
"event_data",
":",
"for",
"attribute_name",
",",
"attribute_value",
"in",
"event_data",
".",
"GetAttributes",
"(",
")",
":",
"setattr",
"(",
"event",
",",
"attribute_name",
",",
"attribute_value",
")",
"event_identifier",
"=",
"event",
".",
"GetIdentifier",
"(",
")",
"event",
".",
"tag",
"=",
"self",
".",
"_event_tag_index",
".",
"GetEventTagByIdentifier",
"(",
"storage_reader",
",",
"event_identifier",
")",
"if",
"time_slice_range",
"and",
"event",
".",
"timestamp",
"!=",
"time_slice",
".",
"event_timestamp",
":",
"self",
".",
"_events_status",
".",
"number_of_events_from_time_slice",
"+=",
"1",
"if",
"event_filter",
":",
"filter_match",
"=",
"event_filter",
".",
"Match",
"(",
"event",
")",
"else",
":",
"filter_match",
"=",
"None",
"# pylint: disable=singleton-comparison",
"if",
"filter_match",
"==",
"False",
":",
"if",
"not",
"time_slice_buffer",
":",
"self",
".",
"_events_status",
".",
"number_of_filtered_events",
"+=",
"1",
"elif",
"forward_entries",
"==",
"0",
":",
"time_slice_buffer",
".",
"Append",
"(",
"event",
")",
"self",
".",
"_events_status",
".",
"number_of_filtered_events",
"+=",
"1",
"elif",
"forward_entries",
"<=",
"time_slice_buffer",
".",
"size",
":",
"self",
".",
"_ExportEvent",
"(",
"output_module",
",",
"event",
",",
"deduplicate_events",
"=",
"deduplicate_events",
")",
"self",
".",
"_number_of_consumed_events",
"+=",
"1",
"self",
".",
"_events_status",
".",
"number_of_events_from_time_slice",
"+=",
"1",
"forward_entries",
"+=",
"1",
"else",
":",
"# We reached the maximum size of the time slice and don't need to",
"# include other entries.",
"self",
".",
"_events_status",
".",
"number_of_filtered_events",
"+=",
"1",
"forward_entries",
"=",
"0",
"else",
":",
"# pylint: disable=singleton-comparison",
"if",
"filter_match",
"==",
"True",
"and",
"time_slice_buffer",
":",
"# Empty the time slice buffer.",
"for",
"event_in_buffer",
"in",
"time_slice_buffer",
".",
"Flush",
"(",
")",
":",
"self",
".",
"_ExportEvent",
"(",
"output_module",
",",
"event_in_buffer",
",",
"deduplicate_events",
"=",
"deduplicate_events",
")",
"self",
".",
"_number_of_consumed_events",
"+=",
"1",
"self",
".",
"_events_status",
".",
"number_of_filtered_events",
"+=",
"1",
"self",
".",
"_events_status",
".",
"number_of_events_from_time_slice",
"+=",
"1",
"forward_entries",
"=",
"1",
"self",
".",
"_ExportEvent",
"(",
"output_module",
",",
"event",
",",
"deduplicate_events",
"=",
"deduplicate_events",
")",
"self",
".",
"_number_of_consumed_events",
"+=",
"1",
"# pylint: disable=singleton-comparison",
"if",
"(",
"filter_match",
"==",
"True",
"and",
"filter_limit",
"and",
"filter_limit",
"==",
"self",
".",
"_number_of_consumed_events",
")",
":",
"break",
"self",
".",
"_FlushExportBuffer",
"(",
"output_module",
")"
] | 37.631068 | 21.223301 |
def parse_fields(self, response, fields_dict, net_start=None,
net_end=None, dt_format=None, field_list=None):
"""
The function for parsing whois fields from a data input.
Args:
response (:obj:`str`): The response from the whois/rwhois server.
fields_dict (:obj:`dict`): The mapping of fields to regex search
values (required).
net_start (:obj:`int`): The starting point of the network (if
parsing multiple networks). Defaults to None.
net_end (:obj:`int`): The ending point of the network (if parsing
multiple networks). Defaults to None.
dt_format (:obj:`str`): The format of datetime fields if known.
Defaults to None.
field_list (:obj:`list` of :obj:`str`): If provided, fields to
parse. Defaults to:
::
['name', 'handle', 'description', 'country', 'state',
'city', 'address', 'postal_code', 'emails', 'created',
'updated']
Returns:
dict: A dictionary of fields provided in fields_dict, mapping to
the results of the regex searches.
"""
ret = {}
if not field_list:
field_list = ['name', 'handle', 'description', 'country', 'state',
'city', 'address', 'postal_code', 'emails',
'created', 'updated']
generate = ((field, pattern) for (field, pattern) in
fields_dict.items() if field in field_list)
for field, pattern in generate:
pattern = re.compile(
str(pattern),
re.DOTALL
)
if net_start is not None:
match = pattern.finditer(response, net_end, net_start)
elif net_end is not None:
match = pattern.finditer(response, net_end)
else:
match = pattern.finditer(response)
values = []
sub_section_end = None
for m in match:
if sub_section_end:
if field not in (
'emails'
) and (sub_section_end != (m.start() - 1)):
break
try:
values.append(m.group('val').strip())
except IndexError:
pass
sub_section_end = m.end()
if len(values) > 0:
value = None
try:
if field == 'country':
value = values[0].upper()
elif field in ['created', 'updated'] and dt_format:
value = datetime.strptime(
values[0],
str(dt_format)).isoformat('T')
elif field in ['emails']:
value = list(unique_everseen(values))
else:
values = unique_everseen(values)
value = '\n'.join(values).strip()
except ValueError as e:
log.debug('Whois field parsing failed for {0}: {1}'.format(
field, e))
pass
ret[field] = value
return ret
|
[
"def",
"parse_fields",
"(",
"self",
",",
"response",
",",
"fields_dict",
",",
"net_start",
"=",
"None",
",",
"net_end",
"=",
"None",
",",
"dt_format",
"=",
"None",
",",
"field_list",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"}",
"if",
"not",
"field_list",
":",
"field_list",
"=",
"[",
"'name'",
",",
"'handle'",
",",
"'description'",
",",
"'country'",
",",
"'state'",
",",
"'city'",
",",
"'address'",
",",
"'postal_code'",
",",
"'emails'",
",",
"'created'",
",",
"'updated'",
"]",
"generate",
"=",
"(",
"(",
"field",
",",
"pattern",
")",
"for",
"(",
"field",
",",
"pattern",
")",
"in",
"fields_dict",
".",
"items",
"(",
")",
"if",
"field",
"in",
"field_list",
")",
"for",
"field",
",",
"pattern",
"in",
"generate",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"str",
"(",
"pattern",
")",
",",
"re",
".",
"DOTALL",
")",
"if",
"net_start",
"is",
"not",
"None",
":",
"match",
"=",
"pattern",
".",
"finditer",
"(",
"response",
",",
"net_end",
",",
"net_start",
")",
"elif",
"net_end",
"is",
"not",
"None",
":",
"match",
"=",
"pattern",
".",
"finditer",
"(",
"response",
",",
"net_end",
")",
"else",
":",
"match",
"=",
"pattern",
".",
"finditer",
"(",
"response",
")",
"values",
"=",
"[",
"]",
"sub_section_end",
"=",
"None",
"for",
"m",
"in",
"match",
":",
"if",
"sub_section_end",
":",
"if",
"field",
"not",
"in",
"(",
"'emails'",
")",
"and",
"(",
"sub_section_end",
"!=",
"(",
"m",
".",
"start",
"(",
")",
"-",
"1",
")",
")",
":",
"break",
"try",
":",
"values",
".",
"append",
"(",
"m",
".",
"group",
"(",
"'val'",
")",
".",
"strip",
"(",
")",
")",
"except",
"IndexError",
":",
"pass",
"sub_section_end",
"=",
"m",
".",
"end",
"(",
")",
"if",
"len",
"(",
"values",
")",
">",
"0",
":",
"value",
"=",
"None",
"try",
":",
"if",
"field",
"==",
"'country'",
":",
"value",
"=",
"values",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"elif",
"field",
"in",
"[",
"'created'",
",",
"'updated'",
"]",
"and",
"dt_format",
":",
"value",
"=",
"datetime",
".",
"strptime",
"(",
"values",
"[",
"0",
"]",
",",
"str",
"(",
"dt_format",
")",
")",
".",
"isoformat",
"(",
"'T'",
")",
"elif",
"field",
"in",
"[",
"'emails'",
"]",
":",
"value",
"=",
"list",
"(",
"unique_everseen",
"(",
"values",
")",
")",
"else",
":",
"values",
"=",
"unique_everseen",
"(",
"values",
")",
"value",
"=",
"'\\n'",
".",
"join",
"(",
"values",
")",
".",
"strip",
"(",
")",
"except",
"ValueError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"'Whois field parsing failed for {0}: {1}'",
".",
"format",
"(",
"field",
",",
"e",
")",
")",
"pass",
"ret",
"[",
"field",
"]",
"=",
"value",
"return",
"ret"
] | 29.087719 | 24.736842 |
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
|
[
"def",
"_build_row",
"(",
"padded_cells",
",",
"colwidths",
",",
"colaligns",
",",
"rowfmt",
")",
":",
"if",
"not",
"rowfmt",
":",
"return",
"None",
"if",
"hasattr",
"(",
"rowfmt",
",",
"\"__call__\"",
")",
":",
"return",
"rowfmt",
"(",
"padded_cells",
",",
"colwidths",
",",
"colaligns",
")",
"else",
":",
"return",
"_build_simple_row",
"(",
"padded_cells",
",",
"rowfmt",
")"
] | 38.75 | 18.5 |
def _build_late_dispatcher(func_name):
"""Return a function that calls method 'func_name' on objects.
This is useful for building late-bound dynamic dispatch.
Arguments:
func_name: The name of the instance method that should be called.
Returns:
A function that takes an 'obj' parameter, followed by *args and
returns the result of calling the instance method with the same
name as the contents of 'func_name' on the 'obj' object with the
arguments from *args.
"""
def _late_dynamic_dispatcher(obj, *args):
method = getattr(obj, func_name, None)
if not callable(method):
raise NotImplementedError(
"Instance method %r is not implemented by %r." % (
func_name, obj))
return method(*args)
return _late_dynamic_dispatcher
|
[
"def",
"_build_late_dispatcher",
"(",
"func_name",
")",
":",
"def",
"_late_dynamic_dispatcher",
"(",
"obj",
",",
"*",
"args",
")",
":",
"method",
"=",
"getattr",
"(",
"obj",
",",
"func_name",
",",
"None",
")",
"if",
"not",
"callable",
"(",
"method",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"Instance method %r is not implemented by %r.\"",
"%",
"(",
"func_name",
",",
"obj",
")",
")",
"return",
"method",
"(",
"*",
"args",
")",
"return",
"_late_dynamic_dispatcher"
] | 37.958333 | 20.25 |
def scons_subst(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None):
"""Expand a string or list containing construction variable
substitutions.
This is the work-horse function for substitutions in file names
and the like. The companion scons_subst_list() function (below)
handles separating command lines into lists of arguments, so see
that function if that's what you're looking for.
"""
if isinstance(strSubst, str) and strSubst.find('$') < 0:
return strSubst
class StringSubber(object):
"""A class to construct the results of a scons_subst() call.
This binds a specific construction environment, mode, target and
source with two methods (substitute() and expand()) that handle
the expansion.
"""
def __init__(self, env, mode, conv, gvars):
self.env = env
self.mode = mode
self.conv = conv
self.gvars = gvars
def expand(self, s, lvars):
"""Expand a single "token" as necessary, returning an
appropriate string containing the expansion.
This handles expanding different types of things (strings,
lists, callables) appropriately. It calls the wrapper
substitute() method to re-expand things as necessary, so that
the results of expansions of side-by-side strings still get
re-evaluated separately, not smushed together.
"""
if is_String(s):
try:
s0, s1 = s[:2]
except (IndexError, ValueError):
return s
if s0 != '$':
return s
if s1 == '$':
# In this case keep the double $'s which we'll later
# swap for a single dollar sign as we need to retain
# this information to properly avoid matching "$("" when
# the actual text was "$$("" (or "$)"" when "$$)"" )
return '$$'
elif s1 in '()':
return s
else:
key = s[1:]
if key[0] == '{' or '.' in key:
if key[0] == '{':
key = key[1:-1]
try:
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
except Exception as e:
if e.__class__ in AllowableExceptions:
return ''
raise_exception(e, lvars['TARGETS'], s)
else:
if key in lvars:
s = lvars[key]
elif key in self.gvars:
s = self.gvars[key]
elif not NameError in AllowableExceptions:
raise_exception(NameError(key), lvars['TARGETS'], s)
else:
return ''
# Before re-expanding the result, handle
# recursive expansion by copying the local
# variable dictionary and overwriting a null
# string for the value of the variable name
# we just expanded.
#
# This could potentially be optimized by only
# copying lvars when s contains more expansions,
# but lvars is usually supposed to be pretty
# small, and deeply nested variable expansions
# are probably more the exception than the norm,
# so it should be tolerable for now.
lv = lvars.copy()
var = key.split('.')[0]
lv[var] = ''
return self.substitute(s, lv)
elif is_Sequence(s):
def func(l, conv=self.conv, substitute=self.substitute, lvars=lvars):
return conv(substitute(l, lvars))
return list(map(func, s))
elif callable(s):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
env=self.env,
for_signature=(self.mode != SUBST_CMD))
except TypeError:
# This probably indicates that it's a callable
# object that doesn't match our calling arguments
# (like an Action).
if self.mode == SUBST_RAW:
return s
s = self.conv(s)
return self.substitute(s, lvars)
elif s is None:
return ''
else:
return s
def substitute(self, args, lvars):
"""Substitute expansions in an argument or list of arguments.
This serves as a wrapper for splitting up a string into
separate tokens.
"""
if is_String(args) and not isinstance(args, CmdStringHolder):
args = str(args) # In case it's a UserString.
try:
def sub_match(match):
return self.conv(self.expand(match.group(1), lvars))
result = _dollar_exps.sub(sub_match, args)
except TypeError:
# If the internal conversion routine doesn't return
# strings (it could be overridden to return Nodes, for
# example), then the 1.5.2 re module will throw this
# exception. Back off to a slower, general-purpose
# algorithm that works for all data types.
args = _separate_args.findall(args)
result = []
for a in args:
result.append(self.conv(self.expand(a, lvars)))
if len(result) == 1:
result = result[0]
else:
result = ''.join(map(str, result))
return result
else:
return self.expand(args, lvars)
if conv is None:
conv = _strconv[mode]
# Doing this every time is a bit of a waste, since the Executor
# has typically already populated the OverrideEnvironment with
# $TARGET/$SOURCE variables. We're keeping this (for now), though,
# because it supports existing behavior that allows us to call
# an Action directly with an arbitrary target+source pair, which
# we use in Tool/tex.py to handle calling $BIBTEX when necessary.
# If we dropped that behavior (or found another way to cover it),
# we could get rid of this call completely and just rely on the
# Executor setting the variables.
if 'TARGET' not in lvars:
d = subst_dict(target, source)
if d:
lvars = lvars.copy()
lvars.update(d)
# We're (most likely) going to eval() things. If Python doesn't
# find a __builtins__ value in the global dictionary used for eval(),
# it copies the current global values for you. Avoid this by
# setting it explicitly and then deleting, so we don't pollute the
# construction environment Dictionary(ies) that are typically used
# for expansion.
gvars['__builtins__'] = __builtins__
ss = StringSubber(env, mode, conv, gvars)
result = ss.substitute(strSubst, lvars)
try:
del gvars['__builtins__']
except KeyError:
pass
res = result
if is_String(result):
# Remove $(-$) pairs and any stuff in between,
# if that's appropriate.
remove = _regex_remove[mode]
if remove:
if mode == SUBST_SIG:
result = _list_remove[mode](remove.split(result))
if result is None:
raise SCons.Errors.UserError("Unbalanced $(/$) in: " + res)
result = ' '.join(result)
else:
result = remove.sub('', result)
if mode != SUBST_RAW:
# Compress strings of white space characters into
# a single space.
result = _space_sep.sub(' ', result).strip()
# Now replace escaped $'s currently "$$"
# This is needed because we now retain $$ instead of
# replacing them during substition to avoid
# improperly trying to escape "$$(" as being "$("
result = result.replace('$$','$')
elif is_Sequence(result):
remove = _list_remove[mode]
if remove:
result = remove(result)
if result is None:
raise SCons.Errors.UserError("Unbalanced $(/$) in: " + str(res))
return result
|
[
"def",
"scons_subst",
"(",
"strSubst",
",",
"env",
",",
"mode",
"=",
"SUBST_RAW",
",",
"target",
"=",
"None",
",",
"source",
"=",
"None",
",",
"gvars",
"=",
"{",
"}",
",",
"lvars",
"=",
"{",
"}",
",",
"conv",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"strSubst",
",",
"str",
")",
"and",
"strSubst",
".",
"find",
"(",
"'$'",
")",
"<",
"0",
":",
"return",
"strSubst",
"class",
"StringSubber",
"(",
"object",
")",
":",
"\"\"\"A class to construct the results of a scons_subst() call.\n\n This binds a specific construction environment, mode, target and\n source with two methods (substitute() and expand()) that handle\n the expansion.\n \"\"\"",
"def",
"__init__",
"(",
"self",
",",
"env",
",",
"mode",
",",
"conv",
",",
"gvars",
")",
":",
"self",
".",
"env",
"=",
"env",
"self",
".",
"mode",
"=",
"mode",
"self",
".",
"conv",
"=",
"conv",
"self",
".",
"gvars",
"=",
"gvars",
"def",
"expand",
"(",
"self",
",",
"s",
",",
"lvars",
")",
":",
"\"\"\"Expand a single \"token\" as necessary, returning an\n appropriate string containing the expansion.\n\n This handles expanding different types of things (strings,\n lists, callables) appropriately. It calls the wrapper\n substitute() method to re-expand things as necessary, so that\n the results of expansions of side-by-side strings still get\n re-evaluated separately, not smushed together.\n \"\"\"",
"if",
"is_String",
"(",
"s",
")",
":",
"try",
":",
"s0",
",",
"s1",
"=",
"s",
"[",
":",
"2",
"]",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"return",
"s",
"if",
"s0",
"!=",
"'$'",
":",
"return",
"s",
"if",
"s1",
"==",
"'$'",
":",
"# In this case keep the double $'s which we'll later",
"# swap for a single dollar sign as we need to retain",
"# this information to properly avoid matching \"$(\"\" when",
"# the actual text was \"$$(\"\" (or \"$)\"\" when \"$$)\"\" )",
"return",
"'$$'",
"elif",
"s1",
"in",
"'()'",
":",
"return",
"s",
"else",
":",
"key",
"=",
"s",
"[",
"1",
":",
"]",
"if",
"key",
"[",
"0",
"]",
"==",
"'{'",
"or",
"'.'",
"in",
"key",
":",
"if",
"key",
"[",
"0",
"]",
"==",
"'{'",
":",
"key",
"=",
"key",
"[",
"1",
":",
"-",
"1",
"]",
"try",
":",
"s",
"=",
"eval",
"(",
"key",
",",
"self",
".",
"gvars",
",",
"lvars",
")",
"except",
"KeyboardInterrupt",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"if",
"e",
".",
"__class__",
"in",
"AllowableExceptions",
":",
"return",
"''",
"raise_exception",
"(",
"e",
",",
"lvars",
"[",
"'TARGETS'",
"]",
",",
"s",
")",
"else",
":",
"if",
"key",
"in",
"lvars",
":",
"s",
"=",
"lvars",
"[",
"key",
"]",
"elif",
"key",
"in",
"self",
".",
"gvars",
":",
"s",
"=",
"self",
".",
"gvars",
"[",
"key",
"]",
"elif",
"not",
"NameError",
"in",
"AllowableExceptions",
":",
"raise_exception",
"(",
"NameError",
"(",
"key",
")",
",",
"lvars",
"[",
"'TARGETS'",
"]",
",",
"s",
")",
"else",
":",
"return",
"''",
"# Before re-expanding the result, handle",
"# recursive expansion by copying the local",
"# variable dictionary and overwriting a null",
"# string for the value of the variable name",
"# we just expanded.",
"#",
"# This could potentially be optimized by only",
"# copying lvars when s contains more expansions,",
"# but lvars is usually supposed to be pretty",
"# small, and deeply nested variable expansions",
"# are probably more the exception than the norm,",
"# so it should be tolerable for now.",
"lv",
"=",
"lvars",
".",
"copy",
"(",
")",
"var",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"lv",
"[",
"var",
"]",
"=",
"''",
"return",
"self",
".",
"substitute",
"(",
"s",
",",
"lv",
")",
"elif",
"is_Sequence",
"(",
"s",
")",
":",
"def",
"func",
"(",
"l",
",",
"conv",
"=",
"self",
".",
"conv",
",",
"substitute",
"=",
"self",
".",
"substitute",
",",
"lvars",
"=",
"lvars",
")",
":",
"return",
"conv",
"(",
"substitute",
"(",
"l",
",",
"lvars",
")",
")",
"return",
"list",
"(",
"map",
"(",
"func",
",",
"s",
")",
")",
"elif",
"callable",
"(",
"s",
")",
":",
"try",
":",
"s",
"=",
"s",
"(",
"target",
"=",
"lvars",
"[",
"'TARGETS'",
"]",
",",
"source",
"=",
"lvars",
"[",
"'SOURCES'",
"]",
",",
"env",
"=",
"self",
".",
"env",
",",
"for_signature",
"=",
"(",
"self",
".",
"mode",
"!=",
"SUBST_CMD",
")",
")",
"except",
"TypeError",
":",
"# This probably indicates that it's a callable",
"# object that doesn't match our calling arguments",
"# (like an Action).",
"if",
"self",
".",
"mode",
"==",
"SUBST_RAW",
":",
"return",
"s",
"s",
"=",
"self",
".",
"conv",
"(",
"s",
")",
"return",
"self",
".",
"substitute",
"(",
"s",
",",
"lvars",
")",
"elif",
"s",
"is",
"None",
":",
"return",
"''",
"else",
":",
"return",
"s",
"def",
"substitute",
"(",
"self",
",",
"args",
",",
"lvars",
")",
":",
"\"\"\"Substitute expansions in an argument or list of arguments.\n\n This serves as a wrapper for splitting up a string into\n separate tokens.\n \"\"\"",
"if",
"is_String",
"(",
"args",
")",
"and",
"not",
"isinstance",
"(",
"args",
",",
"CmdStringHolder",
")",
":",
"args",
"=",
"str",
"(",
"args",
")",
"# In case it's a UserString.",
"try",
":",
"def",
"sub_match",
"(",
"match",
")",
":",
"return",
"self",
".",
"conv",
"(",
"self",
".",
"expand",
"(",
"match",
".",
"group",
"(",
"1",
")",
",",
"lvars",
")",
")",
"result",
"=",
"_dollar_exps",
".",
"sub",
"(",
"sub_match",
",",
"args",
")",
"except",
"TypeError",
":",
"# If the internal conversion routine doesn't return",
"# strings (it could be overridden to return Nodes, for",
"# example), then the 1.5.2 re module will throw this",
"# exception. Back off to a slower, general-purpose",
"# algorithm that works for all data types.",
"args",
"=",
"_separate_args",
".",
"findall",
"(",
"args",
")",
"result",
"=",
"[",
"]",
"for",
"a",
"in",
"args",
":",
"result",
".",
"append",
"(",
"self",
".",
"conv",
"(",
"self",
".",
"expand",
"(",
"a",
",",
"lvars",
")",
")",
")",
"if",
"len",
"(",
"result",
")",
"==",
"1",
":",
"result",
"=",
"result",
"[",
"0",
"]",
"else",
":",
"result",
"=",
"''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"result",
")",
")",
"return",
"result",
"else",
":",
"return",
"self",
".",
"expand",
"(",
"args",
",",
"lvars",
")",
"if",
"conv",
"is",
"None",
":",
"conv",
"=",
"_strconv",
"[",
"mode",
"]",
"# Doing this every time is a bit of a waste, since the Executor",
"# has typically already populated the OverrideEnvironment with",
"# $TARGET/$SOURCE variables. We're keeping this (for now), though,",
"# because it supports existing behavior that allows us to call",
"# an Action directly with an arbitrary target+source pair, which",
"# we use in Tool/tex.py to handle calling $BIBTEX when necessary.",
"# If we dropped that behavior (or found another way to cover it),",
"# we could get rid of this call completely and just rely on the",
"# Executor setting the variables.",
"if",
"'TARGET'",
"not",
"in",
"lvars",
":",
"d",
"=",
"subst_dict",
"(",
"target",
",",
"source",
")",
"if",
"d",
":",
"lvars",
"=",
"lvars",
".",
"copy",
"(",
")",
"lvars",
".",
"update",
"(",
"d",
")",
"# We're (most likely) going to eval() things. If Python doesn't",
"# find a __builtins__ value in the global dictionary used for eval(),",
"# it copies the current global values for you. Avoid this by",
"# setting it explicitly and then deleting, so we don't pollute the",
"# construction environment Dictionary(ies) that are typically used",
"# for expansion.",
"gvars",
"[",
"'__builtins__'",
"]",
"=",
"__builtins__",
"ss",
"=",
"StringSubber",
"(",
"env",
",",
"mode",
",",
"conv",
",",
"gvars",
")",
"result",
"=",
"ss",
".",
"substitute",
"(",
"strSubst",
",",
"lvars",
")",
"try",
":",
"del",
"gvars",
"[",
"'__builtins__'",
"]",
"except",
"KeyError",
":",
"pass",
"res",
"=",
"result",
"if",
"is_String",
"(",
"result",
")",
":",
"# Remove $(-$) pairs and any stuff in between,",
"# if that's appropriate.",
"remove",
"=",
"_regex_remove",
"[",
"mode",
"]",
"if",
"remove",
":",
"if",
"mode",
"==",
"SUBST_SIG",
":",
"result",
"=",
"_list_remove",
"[",
"mode",
"]",
"(",
"remove",
".",
"split",
"(",
"result",
")",
")",
"if",
"result",
"is",
"None",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"Unbalanced $(/$) in: \"",
"+",
"res",
")",
"result",
"=",
"' '",
".",
"join",
"(",
"result",
")",
"else",
":",
"result",
"=",
"remove",
".",
"sub",
"(",
"''",
",",
"result",
")",
"if",
"mode",
"!=",
"SUBST_RAW",
":",
"# Compress strings of white space characters into",
"# a single space.",
"result",
"=",
"_space_sep",
".",
"sub",
"(",
"' '",
",",
"result",
")",
".",
"strip",
"(",
")",
"# Now replace escaped $'s currently \"$$\"",
"# This is needed because we now retain $$ instead of",
"# replacing them during substition to avoid",
"# improperly trying to escape \"$$(\" as being \"$(\"",
"result",
"=",
"result",
".",
"replace",
"(",
"'$$'",
",",
"'$'",
")",
"elif",
"is_Sequence",
"(",
"result",
")",
":",
"remove",
"=",
"_list_remove",
"[",
"mode",
"]",
"if",
"remove",
":",
"result",
"=",
"remove",
"(",
"result",
")",
"if",
"result",
"is",
"None",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"Unbalanced $(/$) in: \"",
"+",
"str",
"(",
"res",
")",
")",
"return",
"result"
] | 42.415459 | 18.072464 |
def powernodes_containing(self, name, directly=False) -> iter:
"""Yield all power nodes containing (power) node of given *name*.
If *directly* is True, will only yield the direct parent of given name.
"""
if directly:
yield from (node for node in self.all_in(name)
if name in self.inclusions[node])
else:
# This algorithm is very bad. Inverting the inclusion dict could
# be far better.
@functools.lru_cache(maxsize=self.node_number(count_pnode=True))
def contains_target(node, target):
succs = self.inclusions[node]
if target in succs:
return True
else:
return any(contains_target(succ, target) for succ in succs)
# populate the cache
for root in self.roots:
contains_target(root, name)
# output all that contains target at some level
yield from (node for node in self.inclusions.keys()
if contains_target(node, name))
|
[
"def",
"powernodes_containing",
"(",
"self",
",",
"name",
",",
"directly",
"=",
"False",
")",
"->",
"iter",
":",
"if",
"directly",
":",
"yield",
"from",
"(",
"node",
"for",
"node",
"in",
"self",
".",
"all_in",
"(",
"name",
")",
"if",
"name",
"in",
"self",
".",
"inclusions",
"[",
"node",
"]",
")",
"else",
":",
"# This algorithm is very bad. Inverting the inclusion dict could",
"# be far better.",
"@",
"functools",
".",
"lru_cache",
"(",
"maxsize",
"=",
"self",
".",
"node_number",
"(",
"count_pnode",
"=",
"True",
")",
")",
"def",
"contains_target",
"(",
"node",
",",
"target",
")",
":",
"succs",
"=",
"self",
".",
"inclusions",
"[",
"node",
"]",
"if",
"target",
"in",
"succs",
":",
"return",
"True",
"else",
":",
"return",
"any",
"(",
"contains_target",
"(",
"succ",
",",
"target",
")",
"for",
"succ",
"in",
"succs",
")",
"# populate the cache",
"for",
"root",
"in",
"self",
".",
"roots",
":",
"contains_target",
"(",
"root",
",",
"name",
")",
"# output all that contains target at some level",
"yield",
"from",
"(",
"node",
"for",
"node",
"in",
"self",
".",
"inclusions",
".",
"keys",
"(",
")",
"if",
"contains_target",
"(",
"node",
",",
"name",
")",
")"
] | 43.92 | 18.48 |
def calculate_single_terms(self):
"""Apply all methods stored in the hidden attribute
`PART_ODE_METHODS`.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> k(0.25)
>>> states.s = 1.0
>>> model.calculate_single_terms()
>>> fluxes.q
q(0.25)
"""
self.numvars.nmb_calls = self.numvars.nmb_calls+1
for method in self.PART_ODE_METHODS:
method(self)
|
[
"def",
"calculate_single_terms",
"(",
"self",
")",
":",
"self",
".",
"numvars",
".",
"nmb_calls",
"=",
"self",
".",
"numvars",
".",
"nmb_calls",
"+",
"1",
"for",
"method",
"in",
"self",
".",
"PART_ODE_METHODS",
":",
"method",
"(",
"self",
")"
] | 30 | 13.2 |
async def stop(self, **kwargs):
"""Stop pairing process."""
if not self._pin_code:
raise Exception('no pin given') # TODO: new exception
self.service.device_credentials = \
await self.pairing_procedure.finish_pairing(self._pin_code)
|
[
"async",
"def",
"stop",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"_pin_code",
":",
"raise",
"Exception",
"(",
"'no pin given'",
")",
"# TODO: new exception",
"self",
".",
"service",
".",
"device_credentials",
"=",
"await",
"self",
".",
"pairing_procedure",
".",
"finish_pairing",
"(",
"self",
".",
"_pin_code",
")"
] | 39.428571 | 17 |
def get_cassandra_connection(alias=None, name=None):
"""
:return: cassandra connection matching alias or name or just first found.
"""
for _alias, connection in get_cassandra_connections():
if alias is not None:
if alias == _alias:
return connection
elif name is not None:
if name == connection.settings_dict['NAME']:
return connection
else:
return connection
|
[
"def",
"get_cassandra_connection",
"(",
"alias",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"for",
"_alias",
",",
"connection",
"in",
"get_cassandra_connections",
"(",
")",
":",
"if",
"alias",
"is",
"not",
"None",
":",
"if",
"alias",
"==",
"_alias",
":",
"return",
"connection",
"elif",
"name",
"is",
"not",
"None",
":",
"if",
"name",
"==",
"connection",
".",
"settings_dict",
"[",
"'NAME'",
"]",
":",
"return",
"connection",
"else",
":",
"return",
"connection"
] | 32.5 | 14.642857 |
def _get_ansi_code(color=None, style=None):
"""return ansi escape code corresponding to color and style
:type color: str or None
:param color:
the color name (see `ANSI_COLORS` for available values)
or the color number when 256 colors are available
:type style: str or None
:param style:
style string (see `ANSI_COLORS` for available values). To get
several style effects at the same time, use a coma as separator.
:raise KeyError: if an unexistent color or style identifier is given
:rtype: str
:return: the built escape code
"""
ansi_code = []
if style:
style_attrs = utils._splitstrip(style)
for effect in style_attrs:
ansi_code.append(ANSI_STYLES[effect])
if color:
if color.isdigit():
ansi_code.extend(["38", "5"])
ansi_code.append(color)
else:
ansi_code.append(ANSI_COLORS[color])
if ansi_code:
return ANSI_PREFIX + ";".join(ansi_code) + ANSI_END
return ""
|
[
"def",
"_get_ansi_code",
"(",
"color",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"ansi_code",
"=",
"[",
"]",
"if",
"style",
":",
"style_attrs",
"=",
"utils",
".",
"_splitstrip",
"(",
"style",
")",
"for",
"effect",
"in",
"style_attrs",
":",
"ansi_code",
".",
"append",
"(",
"ANSI_STYLES",
"[",
"effect",
"]",
")",
"if",
"color",
":",
"if",
"color",
".",
"isdigit",
"(",
")",
":",
"ansi_code",
".",
"extend",
"(",
"[",
"\"38\"",
",",
"\"5\"",
"]",
")",
"ansi_code",
".",
"append",
"(",
"color",
")",
"else",
":",
"ansi_code",
".",
"append",
"(",
"ANSI_COLORS",
"[",
"color",
"]",
")",
"if",
"ansi_code",
":",
"return",
"ANSI_PREFIX",
"+",
"\";\"",
".",
"join",
"(",
"ansi_code",
")",
"+",
"ANSI_END",
"return",
"\"\""
] | 31.34375 | 19.03125 |
def match(self, *command_tokens, **command_env):
""" :meth:`.WCommandProto.match` implementation
"""
mutated_command_tokens = self.mutate_command_tokens(*command_tokens)
if mutated_command_tokens is None:
return False
return self.selector().select(*mutated_command_tokens, **command_env) is not None
|
[
"def",
"match",
"(",
"self",
",",
"*",
"command_tokens",
",",
"*",
"*",
"command_env",
")",
":",
"mutated_command_tokens",
"=",
"self",
".",
"mutate_command_tokens",
"(",
"*",
"command_tokens",
")",
"if",
"mutated_command_tokens",
"is",
"None",
":",
"return",
"False",
"return",
"self",
".",
"selector",
"(",
")",
".",
"select",
"(",
"*",
"mutated_command_tokens",
",",
"*",
"*",
"command_env",
")",
"is",
"not",
"None"
] | 43.714286 | 15.714286 |
def is_valid_assignment(self, mtf_dimension_name, mesh_dimension_name):
"""Whether this MTF dimension may be assigned to this mesh dimension.
Args:
mtf_dimension_name: string, the name of a Mesh TensorFlow dimension.
mesh_dimension_name: string, the name of a mesh dimension.
Returns:
A boolean indicating whether the assignment is valid.
"""
return ((mtf_dimension_name in self._splittable_mtf_dimension_names) and
(self._mtf_dimension_name_to_size_gcd[mtf_dimension_name] %
self._mesh_dimension_name_to_size[mesh_dimension_name] == 0))
|
[
"def",
"is_valid_assignment",
"(",
"self",
",",
"mtf_dimension_name",
",",
"mesh_dimension_name",
")",
":",
"return",
"(",
"(",
"mtf_dimension_name",
"in",
"self",
".",
"_splittable_mtf_dimension_names",
")",
"and",
"(",
"self",
".",
"_mtf_dimension_name_to_size_gcd",
"[",
"mtf_dimension_name",
"]",
"%",
"self",
".",
"_mesh_dimension_name_to_size",
"[",
"mesh_dimension_name",
"]",
"==",
"0",
")",
")"
] | 45.384615 | 26.769231 |
def classic_administrators(self):
"""Instance depends on the API version:
* 2015-06-01: :class:`ClassicAdministratorsOperations<azure.mgmt.authorization.v2015_06_01.operations.ClassicAdministratorsOperations>`
"""
api_version = self._get_api_version('classic_administrators')
if api_version == '2015-06-01':
from .v2015_06_01.operations import ClassicAdministratorsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
|
[
"def",
"classic_administrators",
"(",
"self",
")",
":",
"api_version",
"=",
"self",
".",
"_get_api_version",
"(",
"'classic_administrators'",
")",
"if",
"api_version",
"==",
"'2015-06-01'",
":",
"from",
".",
"v2015_06_01",
".",
"operations",
"import",
"ClassicAdministratorsOperations",
"as",
"OperationClass",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"APIVersion {} is not available\"",
".",
"format",
"(",
"api_version",
")",
")",
"return",
"OperationClass",
"(",
"self",
".",
"_client",
",",
"self",
".",
"config",
",",
"Serializer",
"(",
"self",
".",
"_models_dict",
"(",
"api_version",
")",
")",
",",
"Deserializer",
"(",
"self",
".",
"_models_dict",
"(",
"api_version",
")",
")",
")"
] | 62.909091 | 38.545455 |
def describe_addresses(self, *addresses):
"""
List the elastic IPs allocated in this account.
@param addresses: if specified, the addresses to get information about.
@return: a C{list} of (address, instance_id). If the elastic IP is not
associated currently, C{instance_id} will be C{None}.
"""
address_set = {}
for pos, address in enumerate(addresses):
address_set["PublicIp.%d" % (pos + 1)] = address
query = self.query_factory(
action="DescribeAddresses", creds=self.creds,
endpoint=self.endpoint, other_params=address_set)
d = query.submit()
return d.addCallback(self.parser.describe_addresses)
|
[
"def",
"describe_addresses",
"(",
"self",
",",
"*",
"addresses",
")",
":",
"address_set",
"=",
"{",
"}",
"for",
"pos",
",",
"address",
"in",
"enumerate",
"(",
"addresses",
")",
":",
"address_set",
"[",
"\"PublicIp.%d\"",
"%",
"(",
"pos",
"+",
"1",
")",
"]",
"=",
"address",
"query",
"=",
"self",
".",
"query_factory",
"(",
"action",
"=",
"\"DescribeAddresses\"",
",",
"creds",
"=",
"self",
".",
"creds",
",",
"endpoint",
"=",
"self",
".",
"endpoint",
",",
"other_params",
"=",
"address_set",
")",
"d",
"=",
"query",
".",
"submit",
"(",
")",
"return",
"d",
".",
"addCallback",
"(",
"self",
".",
"parser",
".",
"describe_addresses",
")"
] | 41.882353 | 18.823529 |
def create_dash_stream(self, localStreamNames, targetFolder, **kwargs):
"""
Create Dynamic Adaptive Streaming over HTTP (DASH) out of an existing
H.264/AAC stream. DASH was developed by the Moving Picture Experts
Group (MPEG) to establish a standard for HTTP adaptive-bitrate
streaming that would be accepted by multiple vendors and facilitate
interoperability.
:param localStreamNames: The stream(s) that will be used as the
input. This is a comma-delimited list of active stream names
(local stream names).
:type localStreamNames: str
:param targetFolder: The folder where all the manifest and fragment
files will be stored. This folder must be accessible by the DASH
clients. It is usually in the web-root of the server.
:type targetFolder: str
:param bandwidths: The corresponding bandwidths for each stream listed
in `localStreamNames`. Again, this can be a comma-delimited list.
:type bandwidths: int or str
:param groupName: The name assigned to the DASH stream or group. If
the `localStreamNames` parameter contains only one entry and
`groupName` is not specified, `groupName` will have the value of
the input stream name.
:type groupName: str
:param playlistType: Either `appending` or `rolling`.
:type playlistType: str
:param playlistLength: The number of fragments before the server
starts to overwrite the older fragments. Used only when
`playlistType` is `rolling`. Ignored otherwise.
:type playlistLength: int
:param manifestName: The manifest file name.
:type manifestName: str
:param chunkLength: The length (in seconds) of fragments to be made.
:type chunkLength: int
:param chunkOnIDR: If true, chunking is performed ONLY on IDR.
Otherwise, chunking is performed whenever chunk length is
achieved.
:type chunkOnIDR: int
:param keepAlive: If true, the EMS will attempt to reconnect to the
stream source if the connection is severed.
:type keepAlive: int
:param overwriteDestination: If true, it will allow overwrite of
destination files.
:type overwriteDestination: int
:param staleRetentionCount: How many old files are kept besides the
ones present in the current version of the playlist. Only
applicable for rolling playlists.
:type staleRetentionCount: int
:param cleanupDestination: If true, all manifest and fragment files in
the target folder will be removed before DASH creation is started.
:type cleanupDestination: int
:param dynamicProfile: Set this parameter to 1 (default) for a live
DASH, otherwise set it to 0 for a VOD.
:type dynamicProfile: int
:link: http://docs.evostream.com/ems_api_definition/createdashstream
"""
return self.protocol.execute('createdashstream',
localStreamNames=localStreamNames,
targetFolder=targetFolder, **kwargs)
|
[
"def",
"create_dash_stream",
"(",
"self",
",",
"localStreamNames",
",",
"targetFolder",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"protocol",
".",
"execute",
"(",
"'createdashstream'",
",",
"localStreamNames",
"=",
"localStreamNames",
",",
"targetFolder",
"=",
"targetFolder",
",",
"*",
"*",
"kwargs",
")"
] | 43.863014 | 25.424658 |
def gene_features(self):
"""
return a list of features for the gene features of this object.
This would include exons, introns, utrs, etc.
"""
nm, strand = self.gene_name, self.strand
feats = [(self.chrom, self.start, self.end, nm, strand, 'gene')]
for feat in ('introns', 'exons', 'utr5', 'utr3', 'cdss'):
fname = feat[:-1] if feat[-1] == 's' else feat
res = getattr(self, feat)
if res is None or all(r is None for r in res): continue
if not isinstance(res, list): res = [res]
feats.extend((self.chrom, s, e, nm, strand, fname) for s, e in res)
tss = self.tss(down=1)
if tss is not None:
feats.append((self.chrom, tss[0], tss[1], nm, strand, 'tss'))
prom = self.promoter()
feats.append((self.chrom, prom[0], prom[1], nm, strand, 'promoter'))
return sorted(feats, key=itemgetter(1))
|
[
"def",
"gene_features",
"(",
"self",
")",
":",
"nm",
",",
"strand",
"=",
"self",
".",
"gene_name",
",",
"self",
".",
"strand",
"feats",
"=",
"[",
"(",
"self",
".",
"chrom",
",",
"self",
".",
"start",
",",
"self",
".",
"end",
",",
"nm",
",",
"strand",
",",
"'gene'",
")",
"]",
"for",
"feat",
"in",
"(",
"'introns'",
",",
"'exons'",
",",
"'utr5'",
",",
"'utr3'",
",",
"'cdss'",
")",
":",
"fname",
"=",
"feat",
"[",
":",
"-",
"1",
"]",
"if",
"feat",
"[",
"-",
"1",
"]",
"==",
"'s'",
"else",
"feat",
"res",
"=",
"getattr",
"(",
"self",
",",
"feat",
")",
"if",
"res",
"is",
"None",
"or",
"all",
"(",
"r",
"is",
"None",
"for",
"r",
"in",
"res",
")",
":",
"continue",
"if",
"not",
"isinstance",
"(",
"res",
",",
"list",
")",
":",
"res",
"=",
"[",
"res",
"]",
"feats",
".",
"extend",
"(",
"(",
"self",
".",
"chrom",
",",
"s",
",",
"e",
",",
"nm",
",",
"strand",
",",
"fname",
")",
"for",
"s",
",",
"e",
"in",
"res",
")",
"tss",
"=",
"self",
".",
"tss",
"(",
"down",
"=",
"1",
")",
"if",
"tss",
"is",
"not",
"None",
":",
"feats",
".",
"append",
"(",
"(",
"self",
".",
"chrom",
",",
"tss",
"[",
"0",
"]",
",",
"tss",
"[",
"1",
"]",
",",
"nm",
",",
"strand",
",",
"'tss'",
")",
")",
"prom",
"=",
"self",
".",
"promoter",
"(",
")",
"feats",
".",
"append",
"(",
"(",
"self",
".",
"chrom",
",",
"prom",
"[",
"0",
"]",
",",
"prom",
"[",
"1",
"]",
",",
"nm",
",",
"strand",
",",
"'promoter'",
")",
")",
"return",
"sorted",
"(",
"feats",
",",
"key",
"=",
"itemgetter",
"(",
"1",
")",
")"
] | 44.761905 | 19.714286 |
def content_type_snapshots(self, space_id, environment_id, content_type_id):
"""
Provides access to content type snapshot management methods.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/snapshots
:return: :class:`SnapshotsProxy <contentful_management.snapshots_proxy.SnapshotsProxy>` object.
:rtype: contentful.snapshots_proxy.SnapshotsProxy
Usage:
>>> content_type_snapshots_proxy = client.content_type_snapshots('cfexampleapi', 'master', 'cat')
<SnapshotsProxy[content_types] space_id="cfexampleapi" environment_id="master" parent_resource_id="cat">
"""
return SnapshotsProxy(self, space_id, environment_id, content_type_id, 'content_types')
|
[
"def",
"content_type_snapshots",
"(",
"self",
",",
"space_id",
",",
"environment_id",
",",
"content_type_id",
")",
":",
"return",
"SnapshotsProxy",
"(",
"self",
",",
"space_id",
",",
"environment_id",
",",
"content_type_id",
",",
"'content_types'",
")"
] | 48.8125 | 40.6875 |
def get_item_sh(self, item, roles=None, date_field=None):
"""
Add sorting hat enrichment fields for different roles
If there are no roles, just add the author fields.
"""
eitem_sh = {} # Item enriched
author_field = self.get_field_author()
if not roles:
roles = [author_field]
if not date_field:
item_date = str_to_datetime(item[self.get_field_date()])
else:
item_date = str_to_datetime(item[date_field])
users_data = self.get_users_data(item)
for rol in roles:
if rol in users_data:
identity = self.get_sh_identity(item, rol)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol))
if not eitem_sh[rol + '_org_name']:
eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_name']:
eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_user_name']:
eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE
# Add the author field common in all data sources
rol_author = 'author'
if author_field in users_data and author_field != rol_author:
identity = self.get_sh_identity(item, author_field)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol_author))
if not eitem_sh['author_org_name']:
eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_name']:
eitem_sh['author_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_user_name']:
eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE
return eitem_sh
|
[
"def",
"get_item_sh",
"(",
"self",
",",
"item",
",",
"roles",
"=",
"None",
",",
"date_field",
"=",
"None",
")",
":",
"eitem_sh",
"=",
"{",
"}",
"# Item enriched",
"author_field",
"=",
"self",
".",
"get_field_author",
"(",
")",
"if",
"not",
"roles",
":",
"roles",
"=",
"[",
"author_field",
"]",
"if",
"not",
"date_field",
":",
"item_date",
"=",
"str_to_datetime",
"(",
"item",
"[",
"self",
".",
"get_field_date",
"(",
")",
"]",
")",
"else",
":",
"item_date",
"=",
"str_to_datetime",
"(",
"item",
"[",
"date_field",
"]",
")",
"users_data",
"=",
"self",
".",
"get_users_data",
"(",
"item",
")",
"for",
"rol",
"in",
"roles",
":",
"if",
"rol",
"in",
"users_data",
":",
"identity",
"=",
"self",
".",
"get_sh_identity",
"(",
"item",
",",
"rol",
")",
"eitem_sh",
".",
"update",
"(",
"self",
".",
"get_item_sh_fields",
"(",
"identity",
",",
"item_date",
",",
"rol",
"=",
"rol",
")",
")",
"if",
"not",
"eitem_sh",
"[",
"rol",
"+",
"'_org_name'",
"]",
":",
"eitem_sh",
"[",
"rol",
"+",
"'_org_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"if",
"not",
"eitem_sh",
"[",
"rol",
"+",
"'_name'",
"]",
":",
"eitem_sh",
"[",
"rol",
"+",
"'_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"if",
"not",
"eitem_sh",
"[",
"rol",
"+",
"'_user_name'",
"]",
":",
"eitem_sh",
"[",
"rol",
"+",
"'_user_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"# Add the author field common in all data sources",
"rol_author",
"=",
"'author'",
"if",
"author_field",
"in",
"users_data",
"and",
"author_field",
"!=",
"rol_author",
":",
"identity",
"=",
"self",
".",
"get_sh_identity",
"(",
"item",
",",
"author_field",
")",
"eitem_sh",
".",
"update",
"(",
"self",
".",
"get_item_sh_fields",
"(",
"identity",
",",
"item_date",
",",
"rol",
"=",
"rol_author",
")",
")",
"if",
"not",
"eitem_sh",
"[",
"'author_org_name'",
"]",
":",
"eitem_sh",
"[",
"'author_org_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"if",
"not",
"eitem_sh",
"[",
"'author_name'",
"]",
":",
"eitem_sh",
"[",
"'author_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"if",
"not",
"eitem_sh",
"[",
"'author_user_name'",
"]",
":",
"eitem_sh",
"[",
"'author_user_name'",
"]",
"=",
"SH_UNKNOWN_VALUE",
"return",
"eitem_sh"
] | 33.557692 | 23.826923 |
def _operators_replace(self, string: str) -> str:
"""
Searches for first unary or binary operator (via self.op_regex
that has only one group that contain operator)
then replaces it (or escapes it if brackets do not match).
Everything until:
* space ' '
* begin/end of the string
* bracket from outer scope (like '{a/b}': term1=a term2=b)
is considered a term (contents of matching brackets '{}' are
ignored).
Attributes
----------
string: str
string to replace
"""
# noinspection PyShadowingNames
def replace(string: str, start: int, end: int, substring: str) -> str:
return string[0:start] + substring + string[end:len(string)]
# noinspection PyShadowingNames
def sub_pat(pat: Callable[[list], str] or str, terms: list) -> str:
if isinstance(pat, str):
return pat.format(*terms)
else:
return pat(terms)
count = 0
def check():
nonlocal count
count += 1
if count > self.max_while:
raise RuntimeError('Presumably while loop is stuck')
# noinspection PyShadowingNames
def null_replace(match) -> str:
regex_terms = [gr for gr in match.groups() if gr is not None]
op = regex_terms[0]
terms = regex_terms[1:]
return sub_pat(self.null_ops.ops[op]['pat'], terms)
string = self.null_ops.regex.sub(null_replace, string)
for ops, loc in [(self.pref_un_ops, 'r'), (self.postf_un_ops, 'l'),
(self.bin_centr_ops, 'lr')]:
count = 0
match = ops.regex.search(string)
while match:
check()
regex_terms = [gr for gr in match.groups() if gr is not None]
op = regex_terms[0]
loc_map = self._local_map(match, loc)
lmatch, rmatch = None, None
if loc == 'l' or loc == 'lr':
for m in ops.ops[op]['pref'].finditer(string):
if m.end() <= match.start() and loc_map[m.end() - 1] == 0:
lmatch = m
if lmatch is None:
string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op))
match = ops.regex.search(string)
continue
else:
term1 = string[lmatch.end():match.start()]
if loc == 'r' or loc == 'lr':
for m in ops.ops[op]['postf'].finditer(string):
if m.start() >= match.end() and loc_map[m.start()] == 0:
rmatch = m
break
if rmatch is None:
string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op))
match = ops.regex.search(string)
continue
else:
term2 = string[match.end():rmatch.start()]
if loc == 'l':
# noinspection PyUnboundLocalVariable
terms = list(lmatch.groups()) + [term1] + regex_terms[1:]
start, end = lmatch.start(), match.end()
elif loc == 'r':
# noinspection PyUnboundLocalVariable
terms = regex_terms[1:] + [term2] + list(rmatch.groups())
start, end = match.start(), rmatch.end()
elif loc == 'lr':
terms = list(lmatch.groups()) + [term1] + regex_terms[1:] + [term2] + list(rmatch.groups())
start, end = lmatch.start(), rmatch.end()
else: # this never happen
terms = regex_terms[1:]
start, end = match.start(), match.end()
string = replace(string, start, end, sub_pat(ops.ops[op]['pat'], terms))
match = ops.regex.search(string)
return string
|
[
"def",
"_operators_replace",
"(",
"self",
",",
"string",
":",
"str",
")",
"->",
"str",
":",
"# noinspection PyShadowingNames",
"def",
"replace",
"(",
"string",
":",
"str",
",",
"start",
":",
"int",
",",
"end",
":",
"int",
",",
"substring",
":",
"str",
")",
"->",
"str",
":",
"return",
"string",
"[",
"0",
":",
"start",
"]",
"+",
"substring",
"+",
"string",
"[",
"end",
":",
"len",
"(",
"string",
")",
"]",
"# noinspection PyShadowingNames",
"def",
"sub_pat",
"(",
"pat",
":",
"Callable",
"[",
"[",
"list",
"]",
",",
"str",
"]",
"or",
"str",
",",
"terms",
":",
"list",
")",
"->",
"str",
":",
"if",
"isinstance",
"(",
"pat",
",",
"str",
")",
":",
"return",
"pat",
".",
"format",
"(",
"*",
"terms",
")",
"else",
":",
"return",
"pat",
"(",
"terms",
")",
"count",
"=",
"0",
"def",
"check",
"(",
")",
":",
"nonlocal",
"count",
"count",
"+=",
"1",
"if",
"count",
">",
"self",
".",
"max_while",
":",
"raise",
"RuntimeError",
"(",
"'Presumably while loop is stuck'",
")",
"# noinspection PyShadowingNames",
"def",
"null_replace",
"(",
"match",
")",
"->",
"str",
":",
"regex_terms",
"=",
"[",
"gr",
"for",
"gr",
"in",
"match",
".",
"groups",
"(",
")",
"if",
"gr",
"is",
"not",
"None",
"]",
"op",
"=",
"regex_terms",
"[",
"0",
"]",
"terms",
"=",
"regex_terms",
"[",
"1",
":",
"]",
"return",
"sub_pat",
"(",
"self",
".",
"null_ops",
".",
"ops",
"[",
"op",
"]",
"[",
"'pat'",
"]",
",",
"terms",
")",
"string",
"=",
"self",
".",
"null_ops",
".",
"regex",
".",
"sub",
"(",
"null_replace",
",",
"string",
")",
"for",
"ops",
",",
"loc",
"in",
"[",
"(",
"self",
".",
"pref_un_ops",
",",
"'r'",
")",
",",
"(",
"self",
".",
"postf_un_ops",
",",
"'l'",
")",
",",
"(",
"self",
".",
"bin_centr_ops",
",",
"'lr'",
")",
"]",
":",
"count",
"=",
"0",
"match",
"=",
"ops",
".",
"regex",
".",
"search",
"(",
"string",
")",
"while",
"match",
":",
"check",
"(",
")",
"regex_terms",
"=",
"[",
"gr",
"for",
"gr",
"in",
"match",
".",
"groups",
"(",
")",
"if",
"gr",
"is",
"not",
"None",
"]",
"op",
"=",
"regex_terms",
"[",
"0",
"]",
"loc_map",
"=",
"self",
".",
"_local_map",
"(",
"match",
",",
"loc",
")",
"lmatch",
",",
"rmatch",
"=",
"None",
",",
"None",
"if",
"loc",
"==",
"'l'",
"or",
"loc",
"==",
"'lr'",
":",
"for",
"m",
"in",
"ops",
".",
"ops",
"[",
"op",
"]",
"[",
"'pref'",
"]",
".",
"finditer",
"(",
"string",
")",
":",
"if",
"m",
".",
"end",
"(",
")",
"<=",
"match",
".",
"start",
"(",
")",
"and",
"loc_map",
"[",
"m",
".",
"end",
"(",
")",
"-",
"1",
"]",
"==",
"0",
":",
"lmatch",
"=",
"m",
"if",
"lmatch",
"is",
"None",
":",
"string",
"=",
"replace",
"(",
"string",
",",
"match",
".",
"start",
"(",
")",
",",
"match",
".",
"end",
"(",
")",
",",
"match",
".",
"group",
"(",
"0",
")",
".",
"replace",
"(",
"op",
",",
"'\\\\'",
"+",
"op",
")",
")",
"match",
"=",
"ops",
".",
"regex",
".",
"search",
"(",
"string",
")",
"continue",
"else",
":",
"term1",
"=",
"string",
"[",
"lmatch",
".",
"end",
"(",
")",
":",
"match",
".",
"start",
"(",
")",
"]",
"if",
"loc",
"==",
"'r'",
"or",
"loc",
"==",
"'lr'",
":",
"for",
"m",
"in",
"ops",
".",
"ops",
"[",
"op",
"]",
"[",
"'postf'",
"]",
".",
"finditer",
"(",
"string",
")",
":",
"if",
"m",
".",
"start",
"(",
")",
">=",
"match",
".",
"end",
"(",
")",
"and",
"loc_map",
"[",
"m",
".",
"start",
"(",
")",
"]",
"==",
"0",
":",
"rmatch",
"=",
"m",
"break",
"if",
"rmatch",
"is",
"None",
":",
"string",
"=",
"replace",
"(",
"string",
",",
"match",
".",
"start",
"(",
")",
",",
"match",
".",
"end",
"(",
")",
",",
"match",
".",
"group",
"(",
"0",
")",
".",
"replace",
"(",
"op",
",",
"'\\\\'",
"+",
"op",
")",
")",
"match",
"=",
"ops",
".",
"regex",
".",
"search",
"(",
"string",
")",
"continue",
"else",
":",
"term2",
"=",
"string",
"[",
"match",
".",
"end",
"(",
")",
":",
"rmatch",
".",
"start",
"(",
")",
"]",
"if",
"loc",
"==",
"'l'",
":",
"# noinspection PyUnboundLocalVariable",
"terms",
"=",
"list",
"(",
"lmatch",
".",
"groups",
"(",
")",
")",
"+",
"[",
"term1",
"]",
"+",
"regex_terms",
"[",
"1",
":",
"]",
"start",
",",
"end",
"=",
"lmatch",
".",
"start",
"(",
")",
",",
"match",
".",
"end",
"(",
")",
"elif",
"loc",
"==",
"'r'",
":",
"# noinspection PyUnboundLocalVariable",
"terms",
"=",
"regex_terms",
"[",
"1",
":",
"]",
"+",
"[",
"term2",
"]",
"+",
"list",
"(",
"rmatch",
".",
"groups",
"(",
")",
")",
"start",
",",
"end",
"=",
"match",
".",
"start",
"(",
")",
",",
"rmatch",
".",
"end",
"(",
")",
"elif",
"loc",
"==",
"'lr'",
":",
"terms",
"=",
"list",
"(",
"lmatch",
".",
"groups",
"(",
")",
")",
"+",
"[",
"term1",
"]",
"+",
"regex_terms",
"[",
"1",
":",
"]",
"+",
"[",
"term2",
"]",
"+",
"list",
"(",
"rmatch",
".",
"groups",
"(",
")",
")",
"start",
",",
"end",
"=",
"lmatch",
".",
"start",
"(",
")",
",",
"rmatch",
".",
"end",
"(",
")",
"else",
":",
"# this never happen",
"terms",
"=",
"regex_terms",
"[",
"1",
":",
"]",
"start",
",",
"end",
"=",
"match",
".",
"start",
"(",
")",
",",
"match",
".",
"end",
"(",
")",
"string",
"=",
"replace",
"(",
"string",
",",
"start",
",",
"end",
",",
"sub_pat",
"(",
"ops",
".",
"ops",
"[",
"op",
"]",
"[",
"'pat'",
"]",
",",
"terms",
")",
")",
"match",
"=",
"ops",
".",
"regex",
".",
"search",
"(",
"string",
")",
"return",
"string"
] | 43.2 | 20.421053 |
def returner(ret):
'''
Log outcome to sentry. The returner tries to identify errors and report
them as such. All other messages will be reported at info level.
Failed states will be appended as separate list for convenience.
'''
try:
_connect_sentry(_get_message(ret), ret)
except Exception as err:
log.error('Can\'t run connect_sentry: %s', err, exc_info=True)
|
[
"def",
"returner",
"(",
"ret",
")",
":",
"try",
":",
"_connect_sentry",
"(",
"_get_message",
"(",
"ret",
")",
",",
"ret",
")",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"error",
"(",
"'Can\\'t run connect_sentry: %s'",
",",
"err",
",",
"exc_info",
"=",
"True",
")"
] | 36 | 27.272727 |
def extension(filename):
'''Properly extract the extension from filename'''
filename = os.path.basename(filename)
extension = None
while '.' in filename:
filename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
extension = ext if not extension else ext + '.' + extension
return extension
|
[
"def",
"extension",
"(",
"filename",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"extension",
"=",
"None",
"while",
"'.'",
"in",
"filename",
":",
"filename",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"ext",
".",
"startswith",
"(",
"'.'",
")",
":",
"ext",
"=",
"ext",
"[",
"1",
":",
"]",
"extension",
"=",
"ext",
"if",
"not",
"extension",
"else",
"ext",
"+",
"'.'",
"+",
"extension",
"return",
"extension"
] | 29.833333 | 18.833333 |
def ismounted(device):
"""
Check if partition is mounted
Example::
from burlap.disk import ismounted
if ismounted('/dev/sda1'):
print ("disk sda1 is mounted")
"""
# Check filesystem
with settings(hide('running', 'stdout')):
res = run_as_root('mount')
for line in res.splitlines():
fields = line.split()
if fields[0] == device:
return True
# Check swap
with settings(hide('running', 'stdout')):
res = run_as_root('swapon -s')
for line in res.splitlines():
fields = line.split()
if fields[0] == device:
return True
return False
|
[
"def",
"ismounted",
"(",
"device",
")",
":",
"# Check filesystem",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
")",
")",
":",
"res",
"=",
"run_as_root",
"(",
"'mount'",
")",
"for",
"line",
"in",
"res",
".",
"splitlines",
"(",
")",
":",
"fields",
"=",
"line",
".",
"split",
"(",
")",
"if",
"fields",
"[",
"0",
"]",
"==",
"device",
":",
"return",
"True",
"# Check swap",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
")",
")",
":",
"res",
"=",
"run_as_root",
"(",
"'swapon -s'",
")",
"for",
"line",
"in",
"res",
".",
"splitlines",
"(",
")",
":",
"fields",
"=",
"line",
".",
"split",
"(",
")",
"if",
"fields",
"[",
"0",
"]",
"==",
"device",
":",
"return",
"True",
"return",
"False"
] | 23.071429 | 15.428571 |
def _tffunc(*argtypes):
'''Helper that transforms TF-graph generating function into a regular one.
See `_resize` function below.
'''
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
|
[
"def",
"_tffunc",
"(",
"*",
"argtypes",
")",
":",
"placeholders",
"=",
"list",
"(",
"map",
"(",
"tf",
".",
"placeholder",
",",
"argtypes",
")",
")",
"def",
"wrap",
"(",
"f",
")",
":",
"out",
"=",
"f",
"(",
"*",
"placeholders",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"return",
"out",
".",
"eval",
"(",
"dict",
"(",
"zip",
"(",
"placeholders",
",",
"args",
")",
")",
",",
"session",
"=",
"kw",
".",
"get",
"(",
"'session'",
")",
")",
"return",
"wrapper",
"return",
"wrap"
] | 39.636364 | 20.181818 |
def parse_input():
"""Parses command line input."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-c', '--config', type=str,
help="Specify a configuration file")
return parser.parse_args()
|
[
"def",
"parse_input",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"__doc__",
",",
"formatter_class",
"=",
"argparse",
".",
"RawDescriptionHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"'--config'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Specify a configuration file\"",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
] | 40.125 | 12.375 |
async def send_script(self, conn_id, data):
"""Send a a script to a device.
See :meth:`AbstractDeviceAdapter.send_script`.
"""
progress_callback = functools.partial(_on_progress, self, 'script', conn_id)
resp = await self._execute(self._adapter.send_script_sync, conn_id, data, progress_callback)
_raise_error(conn_id, 'send_rpc', resp)
|
[
"async",
"def",
"send_script",
"(",
"self",
",",
"conn_id",
",",
"data",
")",
":",
"progress_callback",
"=",
"functools",
".",
"partial",
"(",
"_on_progress",
",",
"self",
",",
"'script'",
",",
"conn_id",
")",
"resp",
"=",
"await",
"self",
".",
"_execute",
"(",
"self",
".",
"_adapter",
".",
"send_script_sync",
",",
"conn_id",
",",
"data",
",",
"progress_callback",
")",
"_raise_error",
"(",
"conn_id",
",",
"'send_rpc'",
",",
"resp",
")"
] | 37.8 | 24.8 |
def linear_rref(A, b, Matrix=None, S=None):
""" Transform a linear system to reduced row-echelon form
Transforms both the matrix and right-hand side of a linear
system of equations to reduced row echelon form
Parameters
----------
A : Matrix-like
Iterable of rows.
b : iterable
Returns
-------
A', b' - transformed versions
"""
if Matrix is None:
from sympy import Matrix
if S is None:
from sympy import S
mat_rows = [_map2l(S, list(row) + [v]) for row, v in zip(A, b)]
aug = Matrix(mat_rows)
raug, pivot = aug.rref()
nindep = len(pivot)
return raug[:nindep, :-1], raug[:nindep, -1]
|
[
"def",
"linear_rref",
"(",
"A",
",",
"b",
",",
"Matrix",
"=",
"None",
",",
"S",
"=",
"None",
")",
":",
"if",
"Matrix",
"is",
"None",
":",
"from",
"sympy",
"import",
"Matrix",
"if",
"S",
"is",
"None",
":",
"from",
"sympy",
"import",
"S",
"mat_rows",
"=",
"[",
"_map2l",
"(",
"S",
",",
"list",
"(",
"row",
")",
"+",
"[",
"v",
"]",
")",
"for",
"row",
",",
"v",
"in",
"zip",
"(",
"A",
",",
"b",
")",
"]",
"aug",
"=",
"Matrix",
"(",
"mat_rows",
")",
"raug",
",",
"pivot",
"=",
"aug",
".",
"rref",
"(",
")",
"nindep",
"=",
"len",
"(",
"pivot",
")",
"return",
"raug",
"[",
":",
"nindep",
",",
":",
"-",
"1",
"]",
",",
"raug",
"[",
":",
"nindep",
",",
"-",
"1",
"]"
] | 25.269231 | 19.730769 |
def filtered(self, feature_filter, x=None):
# type: (Callable, Any) -> Tuple[FeatureNames, List[int]]
""" Return feature names filtered by a regular expression
``feature_re``, and indices of filtered elements.
"""
indices = []
filtered_feature_names = []
indexed_names = None # type: Optional[Iterable[Tuple[int, Any]]]
if isinstance(self.feature_names, (np.ndarray, list)):
indexed_names = enumerate(self.feature_names)
elif isinstance(self.feature_names, dict):
indexed_names = six.iteritems(self.feature_names)
elif self.feature_names is None:
indexed_names = []
assert indexed_names is not None
if x is not None:
if sp.issparse(x) and len(x.shape) == 2:
assert x.shape[0] == 1
flt = lambda nm, i: feature_filter(nm, x[0, i])
else:
# FIXME: mypy warns about x[i] because it thinks x can be None
flt = lambda nm, i: feature_filter(nm, x[i]) # type: ignore
else:
flt = lambda nm, i: feature_filter(nm)
for idx, name in indexed_names:
if any(flt(nm, idx) for nm in _all_feature_names(name)):
indices.append(idx)
filtered_feature_names.append(name)
if self.has_bias and flt(self.bias_name, self.bias_idx):
assert self.bias_idx is not None # for mypy
bias_name = self.bias_name
indices.append(self.bias_idx)
else:
bias_name = None
return (
FeatureNames(
filtered_feature_names,
bias_name=bias_name,
unkn_template=self.unkn_template,
),
indices)
|
[
"def",
"filtered",
"(",
"self",
",",
"feature_filter",
",",
"x",
"=",
"None",
")",
":",
"# type: (Callable, Any) -> Tuple[FeatureNames, List[int]]",
"indices",
"=",
"[",
"]",
"filtered_feature_names",
"=",
"[",
"]",
"indexed_names",
"=",
"None",
"# type: Optional[Iterable[Tuple[int, Any]]]",
"if",
"isinstance",
"(",
"self",
".",
"feature_names",
",",
"(",
"np",
".",
"ndarray",
",",
"list",
")",
")",
":",
"indexed_names",
"=",
"enumerate",
"(",
"self",
".",
"feature_names",
")",
"elif",
"isinstance",
"(",
"self",
".",
"feature_names",
",",
"dict",
")",
":",
"indexed_names",
"=",
"six",
".",
"iteritems",
"(",
"self",
".",
"feature_names",
")",
"elif",
"self",
".",
"feature_names",
"is",
"None",
":",
"indexed_names",
"=",
"[",
"]",
"assert",
"indexed_names",
"is",
"not",
"None",
"if",
"x",
"is",
"not",
"None",
":",
"if",
"sp",
".",
"issparse",
"(",
"x",
")",
"and",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"2",
":",
"assert",
"x",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
"flt",
"=",
"lambda",
"nm",
",",
"i",
":",
"feature_filter",
"(",
"nm",
",",
"x",
"[",
"0",
",",
"i",
"]",
")",
"else",
":",
"# FIXME: mypy warns about x[i] because it thinks x can be None",
"flt",
"=",
"lambda",
"nm",
",",
"i",
":",
"feature_filter",
"(",
"nm",
",",
"x",
"[",
"i",
"]",
")",
"# type: ignore",
"else",
":",
"flt",
"=",
"lambda",
"nm",
",",
"i",
":",
"feature_filter",
"(",
"nm",
")",
"for",
"idx",
",",
"name",
"in",
"indexed_names",
":",
"if",
"any",
"(",
"flt",
"(",
"nm",
",",
"idx",
")",
"for",
"nm",
"in",
"_all_feature_names",
"(",
"name",
")",
")",
":",
"indices",
".",
"append",
"(",
"idx",
")",
"filtered_feature_names",
".",
"append",
"(",
"name",
")",
"if",
"self",
".",
"has_bias",
"and",
"flt",
"(",
"self",
".",
"bias_name",
",",
"self",
".",
"bias_idx",
")",
":",
"assert",
"self",
".",
"bias_idx",
"is",
"not",
"None",
"# for mypy",
"bias_name",
"=",
"self",
".",
"bias_name",
"indices",
".",
"append",
"(",
"self",
".",
"bias_idx",
")",
"else",
":",
"bias_name",
"=",
"None",
"return",
"(",
"FeatureNames",
"(",
"filtered_feature_names",
",",
"bias_name",
"=",
"bias_name",
",",
"unkn_template",
"=",
"self",
".",
"unkn_template",
",",
")",
",",
"indices",
")"
] | 40.790698 | 15.697674 |
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
parent_id = request.REQUEST.get('parent_id', None)
if parent_id:
return FolderForm
else:
folder_form = super(FolderAdmin, self).get_form(
request, obj=None, **kwargs)
def folder_form_clean(form_obj):
cleaned_data = form_obj.cleaned_data
folders_with_same_name = Folder.objects.filter(
parent=form_obj.instance.parent,
name=cleaned_data['name'])
if form_obj.instance.pk:
folders_with_same_name = folders_with_same_name.exclude(
pk=form_obj.instance.pk)
if folders_with_same_name.exists():
raise ValidationError(
'Folder with this name already exists.')
return cleaned_data
# attach clean to the default form rather than defining a new form
# class
folder_form.clean = folder_form_clean
return folder_form
|
[
"def",
"get_form",
"(",
"self",
",",
"request",
",",
"obj",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"parent_id",
"=",
"request",
".",
"REQUEST",
".",
"get",
"(",
"'parent_id'",
",",
"None",
")",
"if",
"parent_id",
":",
"return",
"FolderForm",
"else",
":",
"folder_form",
"=",
"super",
"(",
"FolderAdmin",
",",
"self",
")",
".",
"get_form",
"(",
"request",
",",
"obj",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"def",
"folder_form_clean",
"(",
"form_obj",
")",
":",
"cleaned_data",
"=",
"form_obj",
".",
"cleaned_data",
"folders_with_same_name",
"=",
"Folder",
".",
"objects",
".",
"filter",
"(",
"parent",
"=",
"form_obj",
".",
"instance",
".",
"parent",
",",
"name",
"=",
"cleaned_data",
"[",
"'name'",
"]",
")",
"if",
"form_obj",
".",
"instance",
".",
"pk",
":",
"folders_with_same_name",
"=",
"folders_with_same_name",
".",
"exclude",
"(",
"pk",
"=",
"form_obj",
".",
"instance",
".",
"pk",
")",
"if",
"folders_with_same_name",
".",
"exists",
"(",
")",
":",
"raise",
"ValidationError",
"(",
"'Folder with this name already exists.'",
")",
"return",
"cleaned_data",
"# attach clean to the default form rather than defining a new form",
"# class",
"folder_form",
".",
"clean",
"=",
"folder_form_clean",
"return",
"folder_form"
] | 41.103448 | 15.517241 |
def getDatabaseFileSize(self):
""" Return the file size of the database as a pretty string. """
if DISABLE_PERSISTENT_CACHING:
return "?"
size = os.path.getsize(self.__db_filepath)
if size > 1000000000:
size = "%0.3fGB" % (size / 1000000000)
elif size > 1000000:
size = "%0.2fMB" % (size / 1000000)
elif size > 1000:
size = "%uKB" % (size // 1000)
else:
size = "%uB" % (size)
return size
|
[
"def",
"getDatabaseFileSize",
"(",
"self",
")",
":",
"if",
"DISABLE_PERSISTENT_CACHING",
":",
"return",
"\"?\"",
"size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"self",
".",
"__db_filepath",
")",
"if",
"size",
">",
"1000000000",
":",
"size",
"=",
"\"%0.3fGB\"",
"%",
"(",
"size",
"/",
"1000000000",
")",
"elif",
"size",
">",
"1000000",
":",
"size",
"=",
"\"%0.2fMB\"",
"%",
"(",
"size",
"/",
"1000000",
")",
"elif",
"size",
">",
"1000",
":",
"size",
"=",
"\"%uKB\"",
"%",
"(",
"size",
"//",
"1000",
")",
"else",
":",
"size",
"=",
"\"%uB\"",
"%",
"(",
"size",
")",
"return",
"size"
] | 31.142857 | 12.428571 |
def AddAdapter(self, device_name, system_name):
'''Convenience method to add a Bluetooth adapter
You have to specify a device name which must be a valid part of an object
path, e. g. "hci0", and an arbitrary system name (pretty hostname).
Returns the new object path.
'''
path = '/org/bluez/' + device_name
adapter_properties = {
'UUIDs': dbus.Array([
'00001000-0000-1000-8000-00805f9b34fb',
'00001001-0000-1000-8000-00805f9b34fb',
'0000112d-0000-1000-8000-00805f9b34fb',
'00001112-0000-1000-8000-00805f9b34fb',
'0000111f-0000-1000-8000-00805f9b34fb',
'0000111e-0000-1000-8000-00805f9b34fb',
'0000110c-0000-1000-8000-00805f9b34fb',
'0000110e-0000-1000-8000-00805f9b34fb',
'0000110a-0000-1000-8000-00805f9b34fb',
'0000110b-0000-1000-8000-00805f9b34fb',
], variant_level=1),
'Discoverable': dbus.Boolean(False, variant_level=1),
'Discovering': dbus.Boolean(False, variant_level=1),
'Pairable': dbus.Boolean(True, variant_level=1),
'Powered': dbus.Boolean(True, variant_level=1),
'Address': dbus.String('00:01:02:03:04:05', variant_level=1),
'Alias': dbus.String(system_name, variant_level=1),
'Name': dbus.String(system_name, variant_level=1),
# Reference:
# http://bluetooth-pentest.narod.ru/software/
# bluetooth_class_of_device-service_generator.html
'Class': dbus.UInt32(268, variant_level=1), # Computer, Laptop
}
self.AddObject(path,
ADAPTER_IFACE,
# Properties
adapter_properties,
# Methods
[
('GetProperties', '', 'a{sv}', 'ret = self.GetAll("org.bluez.Adapter")'),
('SetProperty', 'sv', '', 'self.Set("org.bluez.Adapter", args[0], args[1]); '
'self.EmitSignal("org.bluez.Adapter", "PropertyChanged",'
' "sv", [args[0], args[1]])'),
])
manager = mockobject.objects['/']
manager.props[MANAGER_IFACE]['Adapters'] \
= [dbus.ObjectPath(path, variant_level=1)]
manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',
'oa{sa{sv}}', [
dbus.ObjectPath(path, variant_level=1),
{ADAPTER_IFACE: adapter_properties},
])
manager.EmitSignal(MANAGER_IFACE, 'AdapterAdded',
'o', [dbus.ObjectPath(path, variant_level=1)])
manager.EmitSignal(MANAGER_IFACE, 'DefaultAdapterChanged',
'o', [dbus.ObjectPath(path, variant_level=1)])
manager.EmitSignal(MANAGER_IFACE, 'PropertyChanged', 'sv', [
"Adapters",
dbus.Array([dbus.ObjectPath(path, variant_level=1), ], variant_level=1),
])
return path
|
[
"def",
"AddAdapter",
"(",
"self",
",",
"device_name",
",",
"system_name",
")",
":",
"path",
"=",
"'/org/bluez/'",
"+",
"device_name",
"adapter_properties",
"=",
"{",
"'UUIDs'",
":",
"dbus",
".",
"Array",
"(",
"[",
"'00001000-0000-1000-8000-00805f9b34fb'",
",",
"'00001001-0000-1000-8000-00805f9b34fb'",
",",
"'0000112d-0000-1000-8000-00805f9b34fb'",
",",
"'00001112-0000-1000-8000-00805f9b34fb'",
",",
"'0000111f-0000-1000-8000-00805f9b34fb'",
",",
"'0000111e-0000-1000-8000-00805f9b34fb'",
",",
"'0000110c-0000-1000-8000-00805f9b34fb'",
",",
"'0000110e-0000-1000-8000-00805f9b34fb'",
",",
"'0000110a-0000-1000-8000-00805f9b34fb'",
",",
"'0000110b-0000-1000-8000-00805f9b34fb'",
",",
"]",
",",
"variant_level",
"=",
"1",
")",
",",
"'Discoverable'",
":",
"dbus",
".",
"Boolean",
"(",
"False",
",",
"variant_level",
"=",
"1",
")",
",",
"'Discovering'",
":",
"dbus",
".",
"Boolean",
"(",
"False",
",",
"variant_level",
"=",
"1",
")",
",",
"'Pairable'",
":",
"dbus",
".",
"Boolean",
"(",
"True",
",",
"variant_level",
"=",
"1",
")",
",",
"'Powered'",
":",
"dbus",
".",
"Boolean",
"(",
"True",
",",
"variant_level",
"=",
"1",
")",
",",
"'Address'",
":",
"dbus",
".",
"String",
"(",
"'00:01:02:03:04:05'",
",",
"variant_level",
"=",
"1",
")",
",",
"'Alias'",
":",
"dbus",
".",
"String",
"(",
"system_name",
",",
"variant_level",
"=",
"1",
")",
",",
"'Name'",
":",
"dbus",
".",
"String",
"(",
"system_name",
",",
"variant_level",
"=",
"1",
")",
",",
"# Reference:",
"# http://bluetooth-pentest.narod.ru/software/",
"# bluetooth_class_of_device-service_generator.html",
"'Class'",
":",
"dbus",
".",
"UInt32",
"(",
"268",
",",
"variant_level",
"=",
"1",
")",
",",
"# Computer, Laptop",
"}",
"self",
".",
"AddObject",
"(",
"path",
",",
"ADAPTER_IFACE",
",",
"# Properties",
"adapter_properties",
",",
"# Methods",
"[",
"(",
"'GetProperties'",
",",
"''",
",",
"'a{sv}'",
",",
"'ret = self.GetAll(\"org.bluez.Adapter\")'",
")",
",",
"(",
"'SetProperty'",
",",
"'sv'",
",",
"''",
",",
"'self.Set(\"org.bluez.Adapter\", args[0], args[1]); '",
"'self.EmitSignal(\"org.bluez.Adapter\", \"PropertyChanged\",'",
"' \"sv\", [args[0], args[1]])'",
")",
",",
"]",
")",
"manager",
"=",
"mockobject",
".",
"objects",
"[",
"'/'",
"]",
"manager",
".",
"props",
"[",
"MANAGER_IFACE",
"]",
"[",
"'Adapters'",
"]",
"=",
"[",
"dbus",
".",
"ObjectPath",
"(",
"path",
",",
"variant_level",
"=",
"1",
")",
"]",
"manager",
".",
"EmitSignal",
"(",
"OBJECT_MANAGER_IFACE",
",",
"'InterfacesAdded'",
",",
"'oa{sa{sv}}'",
",",
"[",
"dbus",
".",
"ObjectPath",
"(",
"path",
",",
"variant_level",
"=",
"1",
")",
",",
"{",
"ADAPTER_IFACE",
":",
"adapter_properties",
"}",
",",
"]",
")",
"manager",
".",
"EmitSignal",
"(",
"MANAGER_IFACE",
",",
"'AdapterAdded'",
",",
"'o'",
",",
"[",
"dbus",
".",
"ObjectPath",
"(",
"path",
",",
"variant_level",
"=",
"1",
")",
"]",
")",
"manager",
".",
"EmitSignal",
"(",
"MANAGER_IFACE",
",",
"'DefaultAdapterChanged'",
",",
"'o'",
",",
"[",
"dbus",
".",
"ObjectPath",
"(",
"path",
",",
"variant_level",
"=",
"1",
")",
"]",
")",
"manager",
".",
"EmitSignal",
"(",
"MANAGER_IFACE",
",",
"'PropertyChanged'",
",",
"'sv'",
",",
"[",
"\"Adapters\"",
",",
"dbus",
".",
"Array",
"(",
"[",
"dbus",
".",
"ObjectPath",
"(",
"path",
",",
"variant_level",
"=",
"1",
")",
",",
"]",
",",
"variant_level",
"=",
"1",
")",
",",
"]",
")",
"return",
"path"
] | 44.590909 | 21.560606 |
def data(place):
"""get forecast data."""
lat, lon = place
url = "https://api.forecast.io/forecast/%s/%s,%s?solar" % (APIKEY, lat,
lon)
w_data = json.loads(urllib2.urlopen(url).read())
return w_data
|
[
"def",
"data",
"(",
"place",
")",
":",
"lat",
",",
"lon",
"=",
"place",
"url",
"=",
"\"https://api.forecast.io/forecast/%s/%s,%s?solar\"",
"%",
"(",
"APIKEY",
",",
"lat",
",",
"lon",
")",
"w_data",
"=",
"json",
".",
"loads",
"(",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
".",
"read",
"(",
")",
")",
"return",
"w_data"
] | 39.285714 | 20.142857 |
def format_spec_to_regex(field_name, format_spec):
"""Make an attempt at converting a format spec to a regular expression."""
# NOTE: remove escaped backslashes so regex matches
regex_match = fmt_spec_regex.match(format_spec.replace('\\', ''))
if regex_match is None:
raise ValueError("Invalid format specification: '{}'".format(format_spec))
regex_dict = regex_match.groupdict()
fill = regex_dict['fill']
ftype = regex_dict['type']
width = regex_dict['width']
align = regex_dict['align']
# NOTE: does not properly handle `=` alignment
if fill is None:
if width is not None and width[0] == '0':
fill = '0'
elif ftype in ['s', 'd']:
fill = ' '
char_type = spec_regexes[ftype]
if ftype == 's' and align and align.endswith('='):
raise ValueError("Invalid format specification: '{}'".format(format_spec))
final_regex = char_type
if ftype in allow_multiple and (not width or width == '0'):
final_regex += r'*'
elif width and width != '0':
if not fill:
# we know we have exactly this many characters
final_regex += r'{{{}}}'.format(int(width))
elif fill:
# we don't know how many fill characters we have compared to
# field characters so just match all characters and sort it out
# later during type conversion.
final_regex = r'.{{{}}}'.format(int(width))
elif ftype in allow_multiple:
final_regex += r'*'
return r'(?P<{}>{})'.format(field_name, final_regex)
|
[
"def",
"format_spec_to_regex",
"(",
"field_name",
",",
"format_spec",
")",
":",
"# NOTE: remove escaped backslashes so regex matches",
"regex_match",
"=",
"fmt_spec_regex",
".",
"match",
"(",
"format_spec",
".",
"replace",
"(",
"'\\\\'",
",",
"''",
")",
")",
"if",
"regex_match",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid format specification: '{}'\"",
".",
"format",
"(",
"format_spec",
")",
")",
"regex_dict",
"=",
"regex_match",
".",
"groupdict",
"(",
")",
"fill",
"=",
"regex_dict",
"[",
"'fill'",
"]",
"ftype",
"=",
"regex_dict",
"[",
"'type'",
"]",
"width",
"=",
"regex_dict",
"[",
"'width'",
"]",
"align",
"=",
"regex_dict",
"[",
"'align'",
"]",
"# NOTE: does not properly handle `=` alignment",
"if",
"fill",
"is",
"None",
":",
"if",
"width",
"is",
"not",
"None",
"and",
"width",
"[",
"0",
"]",
"==",
"'0'",
":",
"fill",
"=",
"'0'",
"elif",
"ftype",
"in",
"[",
"'s'",
",",
"'d'",
"]",
":",
"fill",
"=",
"' '",
"char_type",
"=",
"spec_regexes",
"[",
"ftype",
"]",
"if",
"ftype",
"==",
"'s'",
"and",
"align",
"and",
"align",
".",
"endswith",
"(",
"'='",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid format specification: '{}'\"",
".",
"format",
"(",
"format_spec",
")",
")",
"final_regex",
"=",
"char_type",
"if",
"ftype",
"in",
"allow_multiple",
"and",
"(",
"not",
"width",
"or",
"width",
"==",
"'0'",
")",
":",
"final_regex",
"+=",
"r'*'",
"elif",
"width",
"and",
"width",
"!=",
"'0'",
":",
"if",
"not",
"fill",
":",
"# we know we have exactly this many characters",
"final_regex",
"+=",
"r'{{{}}}'",
".",
"format",
"(",
"int",
"(",
"width",
")",
")",
"elif",
"fill",
":",
"# we don't know how many fill characters we have compared to",
"# field characters so just match all characters and sort it out",
"# later during type conversion.",
"final_regex",
"=",
"r'.{{{}}}'",
".",
"format",
"(",
"int",
"(",
"width",
")",
")",
"elif",
"ftype",
"in",
"allow_multiple",
":",
"final_regex",
"+=",
"r'*'",
"return",
"r'(?P<{}>{})'",
".",
"format",
"(",
"field_name",
",",
"final_regex",
")"
] | 45.783784 | 16.594595 |
def requirements_for_changes(self, changes):
"""
Parse changes for requirements
:param list changes:
"""
requirements = []
reqs_set = set()
if isinstance(changes, str):
changes = changes.split('\n')
if not changes or changes[0].startswith('-'):
return requirements
for line in changes:
line = line.strip(' -+*')
if not line:
continue
match = IS_REQUIREMENTS_RE2.search(line) # or IS_REQUIREMENTS_RE.match(line)
if match:
for match in REQUIREMENTS_RE.findall(match.group(1)):
if match[1]:
version = '==' + match[2] if match[1].startswith(' to ') else match[1]
req_str = match[0] + version
else:
req_str = match[0]
if req_str not in reqs_set:
reqs_set.add(req_str)
try:
requirements.append(pkg_resources.Requirement.parse(req_str))
except Exception as e:
log.warn('Could not parse requirement "%s" from changes: %s', req_str, e)
return requirements
|
[
"def",
"requirements_for_changes",
"(",
"self",
",",
"changes",
")",
":",
"requirements",
"=",
"[",
"]",
"reqs_set",
"=",
"set",
"(",
")",
"if",
"isinstance",
"(",
"changes",
",",
"str",
")",
":",
"changes",
"=",
"changes",
".",
"split",
"(",
"'\\n'",
")",
"if",
"not",
"changes",
"or",
"changes",
"[",
"0",
"]",
".",
"startswith",
"(",
"'-'",
")",
":",
"return",
"requirements",
"for",
"line",
"in",
"changes",
":",
"line",
"=",
"line",
".",
"strip",
"(",
"' -+*'",
")",
"if",
"not",
"line",
":",
"continue",
"match",
"=",
"IS_REQUIREMENTS_RE2",
".",
"search",
"(",
"line",
")",
"# or IS_REQUIREMENTS_RE.match(line)",
"if",
"match",
":",
"for",
"match",
"in",
"REQUIREMENTS_RE",
".",
"findall",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
":",
"if",
"match",
"[",
"1",
"]",
":",
"version",
"=",
"'=='",
"+",
"match",
"[",
"2",
"]",
"if",
"match",
"[",
"1",
"]",
".",
"startswith",
"(",
"' to '",
")",
"else",
"match",
"[",
"1",
"]",
"req_str",
"=",
"match",
"[",
"0",
"]",
"+",
"version",
"else",
":",
"req_str",
"=",
"match",
"[",
"0",
"]",
"if",
"req_str",
"not",
"in",
"reqs_set",
":",
"reqs_set",
".",
"add",
"(",
"req_str",
")",
"try",
":",
"requirements",
".",
"append",
"(",
"pkg_resources",
".",
"Requirement",
".",
"parse",
"(",
"req_str",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"warn",
"(",
"'Could not parse requirement \"%s\" from changes: %s'",
",",
"req_str",
",",
"e",
")",
"return",
"requirements"
] | 33.236842 | 20.657895 |
def select_regex_in(pl,regex):
'''
regex = re.compile("^x.*x$")
pl = ['bcd','xabcxx','xx','y']
select_regex_in(pl,'abc')
'''
def cond_func(ele,index,regex):
if(type(ele)==type([])):
cond = regex_in(ele,regex)
else:
m = regex.search(ele)
if(m == None):
cond = False
else:
cond = True
return(cond)
arr = cond_select_values_all2(pl,cond_func=cond_func, cond_func_args =[regex])
return(arr)
|
[
"def",
"select_regex_in",
"(",
"pl",
",",
"regex",
")",
":",
"def",
"cond_func",
"(",
"ele",
",",
"index",
",",
"regex",
")",
":",
"if",
"(",
"type",
"(",
"ele",
")",
"==",
"type",
"(",
"[",
"]",
")",
")",
":",
"cond",
"=",
"regex_in",
"(",
"ele",
",",
"regex",
")",
"else",
":",
"m",
"=",
"regex",
".",
"search",
"(",
"ele",
")",
"if",
"(",
"m",
"==",
"None",
")",
":",
"cond",
"=",
"False",
"else",
":",
"cond",
"=",
"True",
"return",
"(",
"cond",
")",
"arr",
"=",
"cond_select_values_all2",
"(",
"pl",
",",
"cond_func",
"=",
"cond_func",
",",
"cond_func_args",
"=",
"[",
"regex",
"]",
")",
"return",
"(",
"arr",
")"
] | 28.722222 | 15.944444 |
def add_cli_to_bel_namespace(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``upload_bel_namespace`` command to main :mod:`click` function."""
@main.command()
@click.option('-u', '--update', is_flag=True)
@click.pass_obj
def upload(manager: BELNamespaceManagerMixin, update):
"""Upload names/identifiers to terminology store."""
namespace = manager.upload_bel_namespace(update=update)
click.echo(f'uploaded [{namespace.id}] {namespace.keyword}')
return main
|
[
"def",
"add_cli_to_bel_namespace",
"(",
"main",
":",
"click",
".",
"Group",
")",
"->",
"click",
".",
"Group",
":",
"# noqa: D202",
"@",
"main",
".",
"command",
"(",
")",
"@",
"click",
".",
"option",
"(",
"'-u'",
",",
"'--update'",
",",
"is_flag",
"=",
"True",
")",
"@",
"click",
".",
"pass_obj",
"def",
"upload",
"(",
"manager",
":",
"BELNamespaceManagerMixin",
",",
"update",
")",
":",
"\"\"\"Upload names/identifiers to terminology store.\"\"\"",
"namespace",
"=",
"manager",
".",
"upload_bel_namespace",
"(",
"update",
"=",
"update",
")",
"click",
".",
"echo",
"(",
"f'uploaded [{namespace.id}] {namespace.keyword}'",
")",
"return",
"main"
] | 42.25 | 21.833333 |
def h3(data, *args, **kwargs):
"""Facade function to create 3D histograms.
Parameters
----------
data : array_like or list[array_like] or tuple[array_like]
Can be a single array (with three columns) or three different arrays
(for each component)
Returns
-------
physt.histogram_nd.HistogramND
"""
import numpy as np
if data is not None and isinstance(data, (list, tuple)) and not np.isscalar(data[0]):
if "axis_names" not in kwargs:
kwargs["axis_names"] = [(column.name if hasattr(column, "name") else None) for column in data]
data = np.concatenate([item[:, np.newaxis] for item in data], axis=1)
else:
kwargs["dim"] = 3
return histogramdd(data, *args, **kwargs)
|
[
"def",
"h3",
"(",
"data",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"numpy",
"as",
"np",
"if",
"data",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"data",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"not",
"np",
".",
"isscalar",
"(",
"data",
"[",
"0",
"]",
")",
":",
"if",
"\"axis_names\"",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"\"axis_names\"",
"]",
"=",
"[",
"(",
"column",
".",
"name",
"if",
"hasattr",
"(",
"column",
",",
"\"name\"",
")",
"else",
"None",
")",
"for",
"column",
"in",
"data",
"]",
"data",
"=",
"np",
".",
"concatenate",
"(",
"[",
"item",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"for",
"item",
"in",
"data",
"]",
",",
"axis",
"=",
"1",
")",
"else",
":",
"kwargs",
"[",
"\"dim\"",
"]",
"=",
"3",
"return",
"histogramdd",
"(",
"data",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 34.045455 | 24.318182 |
def seek(self, offset, from_what=os.SEEK_SET):
'''
:param offset: Position in the file to seek to
:type offset: integer
Seeks to *offset* bytes from the beginning of the file. This is a no-op if the file is open for writing.
The position is computed from adding *offset* to a reference point; the reference point is selected by the
*from_what* argument. A *from_what* value of 0 measures from the beginning of the file, 1 uses the current file
position, and 2 uses the end of the file as the reference point. *from_what* can be omitted and defaults to 0,
using the beginning of the file as the reference point.
'''
if from_what == os.SEEK_SET:
reference_pos = 0
elif from_what == os.SEEK_CUR:
reference_pos = self._pos
elif from_what == os.SEEK_END:
if self._file_length == None:
desc = self.describe()
self._file_length = int(desc["size"])
reference_pos = self._file_length
else:
raise DXFileError("Invalid value supplied for from_what")
orig_pos = self._pos
self._pos = reference_pos + offset
in_buf = False
orig_buf_pos = self._read_buf.tell()
if offset < orig_pos:
if orig_buf_pos > orig_pos - offset:
# offset is less than original position but within the buffer
in_buf = True
else:
buf_len = dxpy.utils.string_buffer_length(self._read_buf)
if buf_len - orig_buf_pos > offset - orig_pos:
# offset is greater than original position but within the buffer
in_buf = True
if in_buf:
# offset is within the buffer (at least one byte following
# the offset can be read directly out of the buffer)
self._read_buf.seek(orig_buf_pos - orig_pos + offset)
elif offset == orig_pos:
# This seek is a no-op (the cursor is just past the end of
# the read buffer and coincides with the desired seek
# position). We don't have the data ready, but the request
# for the data starting here is already in flight.
#
# Detecting this case helps to optimize for sequential read
# access patterns.
pass
else:
# offset is outside the buffer-- reset buffer and queues.
# This is the failsafe behavior
self._read_buf = BytesIO()
# TODO: if the offset is within the next response(s), don't throw out the queues
self._request_iterator, self._response_iterator = None, None
|
[
"def",
"seek",
"(",
"self",
",",
"offset",
",",
"from_what",
"=",
"os",
".",
"SEEK_SET",
")",
":",
"if",
"from_what",
"==",
"os",
".",
"SEEK_SET",
":",
"reference_pos",
"=",
"0",
"elif",
"from_what",
"==",
"os",
".",
"SEEK_CUR",
":",
"reference_pos",
"=",
"self",
".",
"_pos",
"elif",
"from_what",
"==",
"os",
".",
"SEEK_END",
":",
"if",
"self",
".",
"_file_length",
"==",
"None",
":",
"desc",
"=",
"self",
".",
"describe",
"(",
")",
"self",
".",
"_file_length",
"=",
"int",
"(",
"desc",
"[",
"\"size\"",
"]",
")",
"reference_pos",
"=",
"self",
".",
"_file_length",
"else",
":",
"raise",
"DXFileError",
"(",
"\"Invalid value supplied for from_what\"",
")",
"orig_pos",
"=",
"self",
".",
"_pos",
"self",
".",
"_pos",
"=",
"reference_pos",
"+",
"offset",
"in_buf",
"=",
"False",
"orig_buf_pos",
"=",
"self",
".",
"_read_buf",
".",
"tell",
"(",
")",
"if",
"offset",
"<",
"orig_pos",
":",
"if",
"orig_buf_pos",
">",
"orig_pos",
"-",
"offset",
":",
"# offset is less than original position but within the buffer",
"in_buf",
"=",
"True",
"else",
":",
"buf_len",
"=",
"dxpy",
".",
"utils",
".",
"string_buffer_length",
"(",
"self",
".",
"_read_buf",
")",
"if",
"buf_len",
"-",
"orig_buf_pos",
">",
"offset",
"-",
"orig_pos",
":",
"# offset is greater than original position but within the buffer",
"in_buf",
"=",
"True",
"if",
"in_buf",
":",
"# offset is within the buffer (at least one byte following",
"# the offset can be read directly out of the buffer)",
"self",
".",
"_read_buf",
".",
"seek",
"(",
"orig_buf_pos",
"-",
"orig_pos",
"+",
"offset",
")",
"elif",
"offset",
"==",
"orig_pos",
":",
"# This seek is a no-op (the cursor is just past the end of",
"# the read buffer and coincides with the desired seek",
"# position). We don't have the data ready, but the request",
"# for the data starting here is already in flight.",
"#",
"# Detecting this case helps to optimize for sequential read",
"# access patterns.",
"pass",
"else",
":",
"# offset is outside the buffer-- reset buffer and queues.",
"# This is the failsafe behavior",
"self",
".",
"_read_buf",
"=",
"BytesIO",
"(",
")",
"# TODO: if the offset is within the next response(s), don't throw out the queues",
"self",
".",
"_request_iterator",
",",
"self",
".",
"_response_iterator",
"=",
"None",
",",
"None"
] | 45.793103 | 24.068966 |
def _append_string(self, value, _file): # pylint: disable=no-self-use
"""Call this function to write string contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
_text = str(value).replace('"', '\\"')
_labs = ' "{text}"'.format(text=_text)
_file.write(_labs)
|
[
"def",
"_append_string",
"(",
"self",
",",
"value",
",",
"_file",
")",
":",
"# pylint: disable=no-self-use",
"_text",
"=",
"str",
"(",
"value",
")",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"_labs",
"=",
"' \"{text}\"'",
".",
"format",
"(",
"text",
"=",
"_text",
")",
"_file",
".",
"write",
"(",
"_labs",
")"
] | 33.545455 | 14.454545 |
def LoadFromXml(self, node):
""" Method updates the object from the xml. """
import os
self.classId = node.localName
metaClassId = UcsUtils.FindClassIdInMoMetaIgnoreCase(self.classId)
if metaClassId:
self.classId = metaClassId
if node.hasAttribute(NamingPropertyId.DN):
self.dn = node.getAttribute(NamingPropertyId.DN)
if self.dn:
self.rn = os.path.basename(self.dn)
# Write the attribute and value to dictionary properties, as it is .
self.WriteToAttributes(node)
# Run the LoadFromXml for each childNode recursively and populate child list too.
if (node.hasChildNodes()):
# childList = node._get_childNodes()
# childCount = childList._get_length()
childList = node.childNodes
childCount = len(childList)
for i in range(childCount):
childNode = childList.item(i)
if (childNode.nodeType != Node.ELEMENT_NODE):
continue
c = _GenericMO()
self.child.append(c)
c.LoadFromXml(childNode)
|
[
"def",
"LoadFromXml",
"(",
"self",
",",
"node",
")",
":",
"import",
"os",
"self",
".",
"classId",
"=",
"node",
".",
"localName",
"metaClassId",
"=",
"UcsUtils",
".",
"FindClassIdInMoMetaIgnoreCase",
"(",
"self",
".",
"classId",
")",
"if",
"metaClassId",
":",
"self",
".",
"classId",
"=",
"metaClassId",
"if",
"node",
".",
"hasAttribute",
"(",
"NamingPropertyId",
".",
"DN",
")",
":",
"self",
".",
"dn",
"=",
"node",
".",
"getAttribute",
"(",
"NamingPropertyId",
".",
"DN",
")",
"if",
"self",
".",
"dn",
":",
"self",
".",
"rn",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"dn",
")",
"# Write the attribute and value to dictionary properties, as it is .",
"self",
".",
"WriteToAttributes",
"(",
"node",
")",
"# Run the LoadFromXml for each childNode recursively and populate child list too.",
"if",
"(",
"node",
".",
"hasChildNodes",
"(",
")",
")",
":",
"# childList = node._get_childNodes()",
"# childCount = childList._get_length()",
"childList",
"=",
"node",
".",
"childNodes",
"childCount",
"=",
"len",
"(",
"childList",
")",
"for",
"i",
"in",
"range",
"(",
"childCount",
")",
":",
"childNode",
"=",
"childList",
".",
"item",
"(",
"i",
")",
"if",
"(",
"childNode",
".",
"nodeType",
"!=",
"Node",
".",
"ELEMENT_NODE",
")",
":",
"continue",
"c",
"=",
"_GenericMO",
"(",
")",
"self",
".",
"child",
".",
"append",
"(",
"c",
")",
"c",
".",
"LoadFromXml",
"(",
"childNode",
")"
] | 28.96875 | 19.1875 |
def _check_flag_masks(self, ds, name):
'''
Check a variable's flag_masks attribute for compliance under CF
- flag_masks exists as an array
- flag_masks is the same dtype as the variable
- variable's dtype can support bit-field
- flag_masks is the same length as flag_meanings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
'''
variable = ds.variables[name]
flag_masks = variable.flag_masks
flag_meanings = getattr(ds, 'flag_meanings', None)
valid_masks = TestCtx(BaseCheck.HIGH, self.section_titles['3.5'])
valid_masks.assert_true(isinstance(flag_masks, np.ndarray),
"{}'s flag_masks must be an array of values not {}".format(name, type(flag_masks)))
if not isinstance(flag_masks, np.ndarray):
return valid_masks.to_result()
valid_masks.assert_true(variable.dtype.type == flag_masks.dtype.type,
"flag_masks ({}) mustbe the same data type as {} ({})"
"".format(flag_masks.dtype.type, name, variable.dtype.type))
type_ok = (np.issubdtype(variable.dtype, np.integer) or
np.issubdtype(variable.dtype, 'S') or
np.issubdtype(variable.dtype, 'b'))
valid_masks.assert_true(type_ok, "{}'s data type must be capable of bit-field expression".format(name))
if isinstance(flag_meanings, basestring):
flag_meanings = flag_meanings.split()
valid_masks.assert_true(len(flag_meanings) == len(flag_masks),
"{} flag_meanings and flag_masks should have the same number ".format(name)+\
"of elements.")
return valid_masks.to_result()
|
[
"def",
"_check_flag_masks",
"(",
"self",
",",
"ds",
",",
"name",
")",
":",
"variable",
"=",
"ds",
".",
"variables",
"[",
"name",
"]",
"flag_masks",
"=",
"variable",
".",
"flag_masks",
"flag_meanings",
"=",
"getattr",
"(",
"ds",
",",
"'flag_meanings'",
",",
"None",
")",
"valid_masks",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"HIGH",
",",
"self",
".",
"section_titles",
"[",
"'3.5'",
"]",
")",
"valid_masks",
".",
"assert_true",
"(",
"isinstance",
"(",
"flag_masks",
",",
"np",
".",
"ndarray",
")",
",",
"\"{}'s flag_masks must be an array of values not {}\"",
".",
"format",
"(",
"name",
",",
"type",
"(",
"flag_masks",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"flag_masks",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"valid_masks",
".",
"to_result",
"(",
")",
"valid_masks",
".",
"assert_true",
"(",
"variable",
".",
"dtype",
".",
"type",
"==",
"flag_masks",
".",
"dtype",
".",
"type",
",",
"\"flag_masks ({}) mustbe the same data type as {} ({})\"",
"\"\"",
".",
"format",
"(",
"flag_masks",
".",
"dtype",
".",
"type",
",",
"name",
",",
"variable",
".",
"dtype",
".",
"type",
")",
")",
"type_ok",
"=",
"(",
"np",
".",
"issubdtype",
"(",
"variable",
".",
"dtype",
",",
"np",
".",
"integer",
")",
"or",
"np",
".",
"issubdtype",
"(",
"variable",
".",
"dtype",
",",
"'S'",
")",
"or",
"np",
".",
"issubdtype",
"(",
"variable",
".",
"dtype",
",",
"'b'",
")",
")",
"valid_masks",
".",
"assert_true",
"(",
"type_ok",
",",
"\"{}'s data type must be capable of bit-field expression\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"isinstance",
"(",
"flag_meanings",
",",
"basestring",
")",
":",
"flag_meanings",
"=",
"flag_meanings",
".",
"split",
"(",
")",
"valid_masks",
".",
"assert_true",
"(",
"len",
"(",
"flag_meanings",
")",
"==",
"len",
"(",
"flag_masks",
")",
",",
"\"{} flag_meanings and flag_masks should have the same number \"",
".",
"format",
"(",
"name",
")",
"+",
"\"of elements.\"",
")",
"return",
"valid_masks",
".",
"to_result",
"(",
")"
] | 43.348837 | 26.976744 |
def SendKey(self, key: int, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Make control have focus first and type a key.
`self.SetFocus` may not work for some controls, you may need to click it to make it have focus.
key: int, a key code value in class Keys.
waitTime: float.
"""
self.SetFocus()
SendKey(key, waitTime)
|
[
"def",
"SendKey",
"(",
"self",
",",
"key",
":",
"int",
",",
"waitTime",
":",
"float",
"=",
"OPERATION_WAIT_TIME",
")",
"->",
"None",
":",
"self",
".",
"SetFocus",
"(",
")",
"SendKey",
"(",
"key",
",",
"waitTime",
")"
] | 42.111111 | 18.111111 |
def create(self, request, desc, files, public=False):
"""Creates a gist
Arguments:
request: an initial request object
desc: the gist description
files: a list of files to add to the gist
public: a flag to indicate whether the gist is public or not
Returns:
The URL to the newly created gist.
"""
request.data = json.dumps({
"description": desc,
"public": public,
"files": files,
})
return self.send(request).json()['html_url']
|
[
"def",
"create",
"(",
"self",
",",
"request",
",",
"desc",
",",
"files",
",",
"public",
"=",
"False",
")",
":",
"request",
".",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"description\"",
":",
"desc",
",",
"\"public\"",
":",
"public",
",",
"\"files\"",
":",
"files",
",",
"}",
")",
"return",
"self",
".",
"send",
"(",
"request",
")",
".",
"json",
"(",
")",
"[",
"'html_url'",
"]"
] | 31 | 15.736842 |
def prefixedDec(nstr, schema):
"""
!~~prefixedDec
corresponding strings in documents must begin with the
associated string in the schema, and the right part of strings
in documents must be decimal.
"""
if not nstr.startswith(schema):
return False
postfix = nstr[len(schema):]
try:
int(postfix)
except ValueError:
return False
return True
|
[
"def",
"prefixedDec",
"(",
"nstr",
",",
"schema",
")",
":",
"if",
"not",
"nstr",
".",
"startswith",
"(",
"schema",
")",
":",
"return",
"False",
"postfix",
"=",
"nstr",
"[",
"len",
"(",
"schema",
")",
":",
"]",
"try",
":",
"int",
"(",
"postfix",
")",
"except",
"ValueError",
":",
"return",
"False",
"return",
"True"
] | 26.866667 | 15.666667 |
def get_method_by_idx(self, idx):
"""
Return a specific method by using an index
:param idx: the index of the method
:type idx: int
:rtype: None or an :class:`EncodedMethod` object
"""
if self.__cached_methods_idx == None:
self.__cached_methods_idx = {}
for i in self.classes.class_def:
for j in i.get_methods():
self.__cached_methods_idx[j.get_method_idx()] = j
try:
return self.__cached_methods_idx[idx]
except KeyError:
return None
|
[
"def",
"get_method_by_idx",
"(",
"self",
",",
"idx",
")",
":",
"if",
"self",
".",
"__cached_methods_idx",
"==",
"None",
":",
"self",
".",
"__cached_methods_idx",
"=",
"{",
"}",
"for",
"i",
"in",
"self",
".",
"classes",
".",
"class_def",
":",
"for",
"j",
"in",
"i",
".",
"get_methods",
"(",
")",
":",
"self",
".",
"__cached_methods_idx",
"[",
"j",
".",
"get_method_idx",
"(",
")",
"]",
"=",
"j",
"try",
":",
"return",
"self",
".",
"__cached_methods_idx",
"[",
"idx",
"]",
"except",
"KeyError",
":",
"return",
"None"
] | 32.388889 | 13.833333 |
def put(
self, item: _T, timeout: Union[float, datetime.timedelta] = None
) -> "Future[None]":
"""Put an item into the queue, perhaps waiting until there is room.
Returns a Future, which raises `tornado.util.TimeoutError` after a
timeout.
``timeout`` may be a number denoting a time (on the same
scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
"""
future = Future() # type: Future[None]
try:
self.put_nowait(item)
except QueueFull:
self._putters.append((item, future))
_set_timeout(future, timeout)
else:
future.set_result(None)
return future
|
[
"def",
"put",
"(",
"self",
",",
"item",
":",
"_T",
",",
"timeout",
":",
"Union",
"[",
"float",
",",
"datetime",
".",
"timedelta",
"]",
"=",
"None",
")",
"->",
"\"Future[None]\"",
":",
"future",
"=",
"Future",
"(",
")",
"# type: Future[None]",
"try",
":",
"self",
".",
"put_nowait",
"(",
"item",
")",
"except",
"QueueFull",
":",
"self",
".",
"_putters",
".",
"append",
"(",
"(",
"item",
",",
"future",
")",
")",
"_set_timeout",
"(",
"future",
",",
"timeout",
")",
"else",
":",
"future",
".",
"set_result",
"(",
"None",
")",
"return",
"future"
] | 35.454545 | 19.909091 |
def available_input_formats():
"""
Return all available input formats.
Returns
-------
formats : list
all available input formats
"""
input_formats = []
for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):
logger.debug("driver found: %s", v)
driver_ = v.load()
if hasattr(driver_, "METADATA") and (driver_.METADATA["mode"] in ["r", "rw"]):
input_formats.append(driver_.METADATA["driver_name"])
return input_formats
|
[
"def",
"available_input_formats",
"(",
")",
":",
"input_formats",
"=",
"[",
"]",
"for",
"v",
"in",
"pkg_resources",
".",
"iter_entry_points",
"(",
"DRIVERS_ENTRY_POINT",
")",
":",
"logger",
".",
"debug",
"(",
"\"driver found: %s\"",
",",
"v",
")",
"driver_",
"=",
"v",
".",
"load",
"(",
")",
"if",
"hasattr",
"(",
"driver_",
",",
"\"METADATA\"",
")",
"and",
"(",
"driver_",
".",
"METADATA",
"[",
"\"mode\"",
"]",
"in",
"[",
"\"r\"",
",",
"\"rw\"",
"]",
")",
":",
"input_formats",
".",
"append",
"(",
"driver_",
".",
"METADATA",
"[",
"\"driver_name\"",
"]",
")",
"return",
"input_formats"
] | 30.625 | 17.75 |
def get_series_by_name(self, name):
"""Returns the first :py:class:`.Series` of a given name, or ``None``.
:param str name: The name to search by."""
if not isinstance(name, str):
raise TypeError(
"Can only search series by str name, not '%s'" % str(name)
)
for series in self.all_series():
if series.name() == name:
return series
|
[
"def",
"get_series_by_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"isinstance",
"(",
"name",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Can only search series by str name, not '%s'\"",
"%",
"str",
"(",
"name",
")",
")",
"for",
"series",
"in",
"self",
".",
"all_series",
"(",
")",
":",
"if",
"series",
".",
"name",
"(",
")",
"==",
"name",
":",
"return",
"series"
] | 34.833333 | 14.333333 |
def add_device(self, device, container):
"""Add a device to a group. Wraps JSSObject.add_object_to_path.
Args:
device: A JSSObject to add (as list data), to this object.
location: Element or a string path argument to find()
"""
# There is a size tag which the JSS manages for us, so we can
# ignore it.
if self.findtext("is_smart") == "false":
self.add_object_to_path(device, container)
else:
# Technically this isn't true. It will strangely accept
# them, and they even show up as members of the group!
raise ValueError("Devices may not be added to smart groups.")
|
[
"def",
"add_device",
"(",
"self",
",",
"device",
",",
"container",
")",
":",
"# There is a size tag which the JSS manages for us, so we can",
"# ignore it.",
"if",
"self",
".",
"findtext",
"(",
"\"is_smart\"",
")",
"==",
"\"false\"",
":",
"self",
".",
"add_object_to_path",
"(",
"device",
",",
"container",
")",
"else",
":",
"# Technically this isn't true. It will strangely accept",
"# them, and they even show up as members of the group!",
"raise",
"ValueError",
"(",
"\"Devices may not be added to smart groups.\"",
")"
] | 45.333333 | 20.4 |
def set_iprouting(self, value=None, default=False, disable=False):
"""Configures the state of global ip routing
EosVersion:
4.13.7M
Args:
value(bool): True if ip routing should be enabled or False if
ip routing should be disabled
default (bool): Controls the use of the default keyword
disable (bool): Controls the use of the no keyword
Returns:
bool: True if the commands completed successfully otherwise False
"""
if value is False:
disable = True
cmd = self.command_builder('ip routing', value=value, default=default,
disable=disable)
return self.configure(cmd)
|
[
"def",
"set_iprouting",
"(",
"self",
",",
"value",
"=",
"None",
",",
"default",
"=",
"False",
",",
"disable",
"=",
"False",
")",
":",
"if",
"value",
"is",
"False",
":",
"disable",
"=",
"True",
"cmd",
"=",
"self",
".",
"command_builder",
"(",
"'ip routing'",
",",
"value",
"=",
"value",
",",
"default",
"=",
"default",
",",
"disable",
"=",
"disable",
")",
"return",
"self",
".",
"configure",
"(",
"cmd",
")"
] | 36.75 | 22.3 |
def get(self, user_name: str) -> User:
"""
Gets the User Resource.
"""
user = current_user()
if user.is_admin or user.name == user_name:
return self._get_or_abort(user_name)
else:
abort(403)
|
[
"def",
"get",
"(",
"self",
",",
"user_name",
":",
"str",
")",
"->",
"User",
":",
"user",
"=",
"current_user",
"(",
")",
"if",
"user",
".",
"is_admin",
"or",
"user",
".",
"name",
"==",
"user_name",
":",
"return",
"self",
".",
"_get_or_abort",
"(",
"user_name",
")",
"else",
":",
"abort",
"(",
"403",
")"
] | 28.222222 | 9.555556 |
def search_accounts(self, **kwargs):
"""
Return a list of up to 5 matching account domains. Partial matches on
name and domain are supported.
:calls: `GET /api/v1/accounts/search \
<https://canvas.instructure.com/doc/api/account_domain_lookups.html#method.account_domain_lookups.search>`_
:rtype: dict
"""
response = self.__requester.request(
'GET',
'accounts/search',
_kwargs=combine_kwargs(**kwargs)
)
return response.json()
|
[
"def",
"search_accounts",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"self",
".",
"__requester",
".",
"request",
"(",
"'GET'",
",",
"'accounts/search'",
",",
"_kwargs",
"=",
"combine_kwargs",
"(",
"*",
"*",
"kwargs",
")",
")",
"return",
"response",
".",
"json",
"(",
")"
] | 33.0625 | 19.0625 |
def check_version(cls):
"""Checks server version against minimum required version."""
super(SimpleCpnrDriver, cls).check_version()
model.configure_pnr()
cls.recover_networks()
ver = model.get_version()
if ver < cls.MIN_VERSION:
LOG.warning("CPNR version does not meet minimum requirements, "
"expected: %(ever)f, actual: %(rver)f",
{'ever': cls.MIN_VERSION, 'rver': ver})
return ver
|
[
"def",
"check_version",
"(",
"cls",
")",
":",
"super",
"(",
"SimpleCpnrDriver",
",",
"cls",
")",
".",
"check_version",
"(",
")",
"model",
".",
"configure_pnr",
"(",
")",
"cls",
".",
"recover_networks",
"(",
")",
"ver",
"=",
"model",
".",
"get_version",
"(",
")",
"if",
"ver",
"<",
"cls",
".",
"MIN_VERSION",
":",
"LOG",
".",
"warning",
"(",
"\"CPNR version does not meet minimum requirements, \"",
"\"expected: %(ever)f, actual: %(rver)f\"",
",",
"{",
"'ever'",
":",
"cls",
".",
"MIN_VERSION",
",",
"'rver'",
":",
"ver",
"}",
")",
"return",
"ver"
] | 44.363636 | 15.181818 |
def as_csv(self):
"""Return a CSV representation as a string"""
from io import StringIO
s = StringIO()
w = csv.writer(s)
for row in self.rows:
w.writerow(row)
return s.getvalue()
|
[
"def",
"as_csv",
"(",
"self",
")",
":",
"from",
"io",
"import",
"StringIO",
"s",
"=",
"StringIO",
"(",
")",
"w",
"=",
"csv",
".",
"writer",
"(",
"s",
")",
"for",
"row",
"in",
"self",
".",
"rows",
":",
"w",
".",
"writerow",
"(",
"row",
")",
"return",
"s",
".",
"getvalue",
"(",
")"
] | 21 | 20.181818 |
def get_count(self,name):
""" get the latest counter for a certain parameter type.
Parameters
----------
name : str
the parameter type
Returns
-------
count : int
the latest count for a parameter type
Note
----
calling this function increments the counter for the passed
parameter type
"""
if name not in self.mlt_counter:
self.mlt_counter[name] = 1
c = 0
else:
c = self.mlt_counter[name]
self.mlt_counter[name] += 1
#print(name,c)
return c
|
[
"def",
"get_count",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"mlt_counter",
":",
"self",
".",
"mlt_counter",
"[",
"name",
"]",
"=",
"1",
"c",
"=",
"0",
"else",
":",
"c",
"=",
"self",
".",
"mlt_counter",
"[",
"name",
"]",
"self",
".",
"mlt_counter",
"[",
"name",
"]",
"+=",
"1",
"#print(name,c)",
"return",
"c"
] | 23.037037 | 19.444444 |
def from_stream(cls, stream_rdr, offset):
"""
Return an |_IfdEntry| subclass instance containing the tag and value
of the tag parsed from *stream_rdr* at *offset*. Note this method is
common to all subclasses. Override the ``_parse_value()`` method to
provide distinctive behavior based on field type.
"""
tag_code = stream_rdr.read_short(offset, 0)
value_count = stream_rdr.read_long(offset, 4)
value_offset = stream_rdr.read_long(offset, 8)
value = cls._parse_value(
stream_rdr, offset, value_count, value_offset
)
return cls(tag_code, value)
|
[
"def",
"from_stream",
"(",
"cls",
",",
"stream_rdr",
",",
"offset",
")",
":",
"tag_code",
"=",
"stream_rdr",
".",
"read_short",
"(",
"offset",
",",
"0",
")",
"value_count",
"=",
"stream_rdr",
".",
"read_long",
"(",
"offset",
",",
"4",
")",
"value_offset",
"=",
"stream_rdr",
".",
"read_long",
"(",
"offset",
",",
"8",
")",
"value",
"=",
"cls",
".",
"_parse_value",
"(",
"stream_rdr",
",",
"offset",
",",
"value_count",
",",
"value_offset",
")",
"return",
"cls",
"(",
"tag_code",
",",
"value",
")"
] | 45.642857 | 15.928571 |
def load(self, elem):
"""
Converts the inputted string tag to Python.
:param elem | <xml.etree.ElementTree>
:return <str>
"""
self.testTag(elem, 'str')
return elem.text if elem.text is not None else ''
|
[
"def",
"load",
"(",
"self",
",",
"elem",
")",
":",
"self",
".",
"testTag",
"(",
"elem",
",",
"'str'",
")",
"return",
"elem",
".",
"text",
"if",
"elem",
".",
"text",
"is",
"not",
"None",
"else",
"''"
] | 27.5 | 14.3 |
def _get_images_dir():
'''
Extract the images dir from the configuration. First attempts to
find legacy virt.images, then tries virt:images.
'''
img_dir = __salt__['config.option']('virt.images')
if img_dir:
salt.utils.versions.warn_until(
'Sodium',
'\'virt.images\' has been deprecated in favor of '
'\'virt:images\'. \'virt.images\' will stop '
'being used in {version}.')
else:
img_dir = __salt__['config.get']('virt:images')
log.debug('Image directory from config option `virt:images`'
' is %s', img_dir)
return img_dir
|
[
"def",
"_get_images_dir",
"(",
")",
":",
"img_dir",
"=",
"__salt__",
"[",
"'config.option'",
"]",
"(",
"'virt.images'",
")",
"if",
"img_dir",
":",
"salt",
".",
"utils",
".",
"versions",
".",
"warn_until",
"(",
"'Sodium'",
",",
"'\\'virt.images\\' has been deprecated in favor of '",
"'\\'virt:images\\'. \\'virt.images\\' will stop '",
"'being used in {version}.'",
")",
"else",
":",
"img_dir",
"=",
"__salt__",
"[",
"'config.get'",
"]",
"(",
"'virt:images'",
")",
"log",
".",
"debug",
"(",
"'Image directory from config option `virt:images`'",
"' is %s'",
",",
"img_dir",
")",
"return",
"img_dir"
] | 34.5 | 20.166667 |
def add_batch(self, table, keys, attributes_to_get=None):
"""
Add a Batch to this BatchList.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object in which the items are contained.
:type keys: list
:param keys: A list of scalar or tuple values. Each element in the
list represents one Item to retrieve. If the schema for the
table has both a HashKey and a RangeKey, each element in the
list should be a tuple consisting of (hash_key, range_key). If
the schema for the table contains only a HashKey, each element
in the list should be a scalar value of the appropriate type
for the table schema.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
"""
self.append(Batch(table, keys, attributes_to_get))
|
[
"def",
"add_batch",
"(",
"self",
",",
"table",
",",
"keys",
",",
"attributes_to_get",
"=",
"None",
")",
":",
"self",
".",
"append",
"(",
"Batch",
"(",
"table",
",",
"keys",
",",
"attributes_to_get",
")",
")"
] | 47.5 | 22.136364 |
def make_confidence_report(filepath, train_start=TRAIN_START,
train_end=TRAIN_END,
test_start=TEST_START, test_end=TEST_END,
batch_size=BATCH_SIZE, which_set=WHICH_SET,
mc_batch_size=MC_BATCH_SIZE,
report_path=REPORT_PATH,
base_eps_iter=BASE_EPS_ITER,
nb_iter=NB_ITER, save_advx=SAVE_ADVX):
"""
Load a saved model, gather its predictions, and save a confidence report.
This function works by running a single MaxConfidence attack on each example.
This provides a reasonable estimate of the true failure rate quickly, so
long as the model does not suffer from gradient masking.
However, this estimate is mostly intended for development work and not
for publication. A more accurate estimate may be obtained by running
make_confidence_report_bundled.py instead.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param batch_size: size of evaluation batches
:param which_set: 'train' or 'test'
:param mc_batch_size: batch size for MaxConfidence attack
:param base_eps_iter: step size if the data were in [0,1]
(Step size will be rescaled proportional to the actual data range)
:param nb_iter: Number of iterations of PGD to run per class
:param save_advx: bool. If True, saves the adversarial examples to disk.
On by default, but can be turned off to save memory, etc.
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
sess = tf.Session()
if report_path is None:
assert filepath.endswith('.joblib')
report_path = filepath[:-len('.joblib')] + "_report.joblib"
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
center = dataset.kwargs['center']
max_val = dataset.kwargs['max_val']
value_range = max_val * (1. + center)
min_value = 0. - center * max_val
if 'CIFAR' in str(factory.cls):
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
elif 'MNIST' in str(factory.cls):
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
else:
raise NotImplementedError(str(factory.cls))
mc_params = {'eps': base_eps * value_range,
'eps_iter': base_eps_iter * value_range,
'nb_iter': nb_iter,
'clip_min': min_value,
'clip_max': max_val}
x_data, y_data = dataset.get_set(which_set)
report = ConfidenceReport()
semantic = Semantic(model, center, max_val, sess)
mc = MaxConfidence(model, sess=sess)
jobs = [('clean', None, None, None, False),
('Semantic', semantic, None, None, False),
('mc', mc, mc_params, mc_batch_size, True)]
for job in jobs:
name, attack, attack_params, job_batch_size, save_this_job = job
if job_batch_size is None:
job_batch_size = batch_size
t1 = time.time()
if save_advx and save_this_job:
# If we want to save the adversarial examples to the filesystem, we need
# to fetch all of them. Otherwise they're just computed one batch at a
# time and discarded
# The path to save to
assert report_path.endswith('.joblib')
advx_path = report_path[:-len('.joblib')] + '_advx_' + name + '.npy'
# Fetch the adversarial examples
x_data = run_attack(sess, model, x_data, y_data, attack, attack_params,
batch_size=job_batch_size, devices=devices)
# Turn off the attack so `correctness_and_confidence` won't run it a
# second time.
attack = None
attack_params = None
# Save the adversarial examples
np.save(advx_path, x_data)
# Run correctness and confidence evaluation on adversarial examples
packed = correctness_and_confidence(sess, model, x_data, y_data,
batch_size=job_batch_size,
devices=devices,
attack=attack,
attack_params=attack_params)
t2 = time.time()
print("Evaluation took", t2 - t1, "seconds")
correctness, confidence = packed
report[name] = ConfidenceReportEntry(correctness=correctness,
confidence=confidence)
print_stats(correctness, confidence, name)
save(report_path, report)
|
[
"def",
"make_confidence_report",
"(",
"filepath",
",",
"train_start",
"=",
"TRAIN_START",
",",
"train_end",
"=",
"TRAIN_END",
",",
"test_start",
"=",
"TEST_START",
",",
"test_end",
"=",
"TEST_END",
",",
"batch_size",
"=",
"BATCH_SIZE",
",",
"which_set",
"=",
"WHICH_SET",
",",
"mc_batch_size",
"=",
"MC_BATCH_SIZE",
",",
"report_path",
"=",
"REPORT_PATH",
",",
"base_eps_iter",
"=",
"BASE_EPS_ITER",
",",
"nb_iter",
"=",
"NB_ITER",
",",
"save_advx",
"=",
"SAVE_ADVX",
")",
":",
"# Set TF random seed to improve reproducibility",
"tf",
".",
"set_random_seed",
"(",
"1234",
")",
"# Set logging level to see debug information",
"set_log_level",
"(",
"logging",
".",
"INFO",
")",
"# Create TF session",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"if",
"report_path",
"is",
"None",
":",
"assert",
"filepath",
".",
"endswith",
"(",
"'.joblib'",
")",
"report_path",
"=",
"filepath",
"[",
":",
"-",
"len",
"(",
"'.joblib'",
")",
"]",
"+",
"\"_report.joblib\"",
"with",
"sess",
".",
"as_default",
"(",
")",
":",
"model",
"=",
"load",
"(",
"filepath",
")",
"assert",
"len",
"(",
"model",
".",
"get_params",
"(",
")",
")",
">",
"0",
"factory",
"=",
"model",
".",
"dataset_factory",
"factory",
".",
"kwargs",
"[",
"'train_start'",
"]",
"=",
"train_start",
"factory",
".",
"kwargs",
"[",
"'train_end'",
"]",
"=",
"train_end",
"factory",
".",
"kwargs",
"[",
"'test_start'",
"]",
"=",
"test_start",
"factory",
".",
"kwargs",
"[",
"'test_end'",
"]",
"=",
"test_end",
"dataset",
"=",
"factory",
"(",
")",
"center",
"=",
"dataset",
".",
"kwargs",
"[",
"'center'",
"]",
"max_val",
"=",
"dataset",
".",
"kwargs",
"[",
"'max_val'",
"]",
"value_range",
"=",
"max_val",
"*",
"(",
"1.",
"+",
"center",
")",
"min_value",
"=",
"0.",
"-",
"center",
"*",
"max_val",
"if",
"'CIFAR'",
"in",
"str",
"(",
"factory",
".",
"cls",
")",
":",
"base_eps",
"=",
"8.",
"/",
"255.",
"if",
"base_eps_iter",
"is",
"None",
":",
"base_eps_iter",
"=",
"2.",
"/",
"255.",
"elif",
"'MNIST'",
"in",
"str",
"(",
"factory",
".",
"cls",
")",
":",
"base_eps",
"=",
".3",
"if",
"base_eps_iter",
"is",
"None",
":",
"base_eps_iter",
"=",
".1",
"else",
":",
"raise",
"NotImplementedError",
"(",
"str",
"(",
"factory",
".",
"cls",
")",
")",
"mc_params",
"=",
"{",
"'eps'",
":",
"base_eps",
"*",
"value_range",
",",
"'eps_iter'",
":",
"base_eps_iter",
"*",
"value_range",
",",
"'nb_iter'",
":",
"nb_iter",
",",
"'clip_min'",
":",
"min_value",
",",
"'clip_max'",
":",
"max_val",
"}",
"x_data",
",",
"y_data",
"=",
"dataset",
".",
"get_set",
"(",
"which_set",
")",
"report",
"=",
"ConfidenceReport",
"(",
")",
"semantic",
"=",
"Semantic",
"(",
"model",
",",
"center",
",",
"max_val",
",",
"sess",
")",
"mc",
"=",
"MaxConfidence",
"(",
"model",
",",
"sess",
"=",
"sess",
")",
"jobs",
"=",
"[",
"(",
"'clean'",
",",
"None",
",",
"None",
",",
"None",
",",
"False",
")",
",",
"(",
"'Semantic'",
",",
"semantic",
",",
"None",
",",
"None",
",",
"False",
")",
",",
"(",
"'mc'",
",",
"mc",
",",
"mc_params",
",",
"mc_batch_size",
",",
"True",
")",
"]",
"for",
"job",
"in",
"jobs",
":",
"name",
",",
"attack",
",",
"attack_params",
",",
"job_batch_size",
",",
"save_this_job",
"=",
"job",
"if",
"job_batch_size",
"is",
"None",
":",
"job_batch_size",
"=",
"batch_size",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"if",
"save_advx",
"and",
"save_this_job",
":",
"# If we want to save the adversarial examples to the filesystem, we need",
"# to fetch all of them. Otherwise they're just computed one batch at a",
"# time and discarded",
"# The path to save to",
"assert",
"report_path",
".",
"endswith",
"(",
"'.joblib'",
")",
"advx_path",
"=",
"report_path",
"[",
":",
"-",
"len",
"(",
"'.joblib'",
")",
"]",
"+",
"'_advx_'",
"+",
"name",
"+",
"'.npy'",
"# Fetch the adversarial examples",
"x_data",
"=",
"run_attack",
"(",
"sess",
",",
"model",
",",
"x_data",
",",
"y_data",
",",
"attack",
",",
"attack_params",
",",
"batch_size",
"=",
"job_batch_size",
",",
"devices",
"=",
"devices",
")",
"# Turn off the attack so `correctness_and_confidence` won't run it a",
"# second time.",
"attack",
"=",
"None",
"attack_params",
"=",
"None",
"# Save the adversarial examples",
"np",
".",
"save",
"(",
"advx_path",
",",
"x_data",
")",
"# Run correctness and confidence evaluation on adversarial examples",
"packed",
"=",
"correctness_and_confidence",
"(",
"sess",
",",
"model",
",",
"x_data",
",",
"y_data",
",",
"batch_size",
"=",
"job_batch_size",
",",
"devices",
"=",
"devices",
",",
"attack",
"=",
"attack",
",",
"attack_params",
"=",
"attack_params",
")",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"\"Evaluation took\"",
",",
"t2",
"-",
"t1",
",",
"\"seconds\"",
")",
"correctness",
",",
"confidence",
"=",
"packed",
"report",
"[",
"name",
"]",
"=",
"ConfidenceReportEntry",
"(",
"correctness",
"=",
"correctness",
",",
"confidence",
"=",
"confidence",
")",
"print_stats",
"(",
"correctness",
",",
"confidence",
",",
"name",
")",
"save",
"(",
"report_path",
",",
"report",
")"
] | 37.007576 | 19.810606 |
def addLayerNode(self, layerName, bias = None, weights = {}):
"""
Adds a new node to a layer, and puts in new weights. Adds node on the end.
Weights will be random, unless specified.
bias = the new node's bias weight
weights = dict of {connectedLayerName: [weights], ...}
Example:
>>> net = Network() # doctest: +ELLIPSIS
Conx using seed: ...
>>> net.addLayers(2, 5, 1)
>>> net.addLayerNode("hidden", bias = -0.12, weights = {"input": [1, 0], "output": [0]})
"""
self.changeLayerSize(layerName, self[layerName].size + 1)
if bias != None:
self[layerName].weight[-1] = bias
for name in list(weights.keys()):
for c in self.connections:
if c.fromLayer.name == name and c.toLayer.name == layerName:
for i in range(self[name].size):
self[name, layerName].weight[i][-1] = weights[name][i]
elif c.toLayer.name == name and c.fromLayer.name == layerName:
for j in range(self[name].size):
self[layerName, name].weight[-1][j] = weights[name][j]
|
[
"def",
"addLayerNode",
"(",
"self",
",",
"layerName",
",",
"bias",
"=",
"None",
",",
"weights",
"=",
"{",
"}",
")",
":",
"self",
".",
"changeLayerSize",
"(",
"layerName",
",",
"self",
"[",
"layerName",
"]",
".",
"size",
"+",
"1",
")",
"if",
"bias",
"!=",
"None",
":",
"self",
"[",
"layerName",
"]",
".",
"weight",
"[",
"-",
"1",
"]",
"=",
"bias",
"for",
"name",
"in",
"list",
"(",
"weights",
".",
"keys",
"(",
")",
")",
":",
"for",
"c",
"in",
"self",
".",
"connections",
":",
"if",
"c",
".",
"fromLayer",
".",
"name",
"==",
"name",
"and",
"c",
".",
"toLayer",
".",
"name",
"==",
"layerName",
":",
"for",
"i",
"in",
"range",
"(",
"self",
"[",
"name",
"]",
".",
"size",
")",
":",
"self",
"[",
"name",
",",
"layerName",
"]",
".",
"weight",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
"=",
"weights",
"[",
"name",
"]",
"[",
"i",
"]",
"elif",
"c",
".",
"toLayer",
".",
"name",
"==",
"name",
"and",
"c",
".",
"fromLayer",
".",
"name",
"==",
"layerName",
":",
"for",
"j",
"in",
"range",
"(",
"self",
"[",
"name",
"]",
".",
"size",
")",
":",
"self",
"[",
"layerName",
",",
"name",
"]",
".",
"weight",
"[",
"-",
"1",
"]",
"[",
"j",
"]",
"=",
"weights",
"[",
"name",
"]",
"[",
"j",
"]"
] | 46.76 | 20.28 |
def as_uni_field(field):
"""
Renders a form field like a django-uni-form field::
{% load uni_form_tags %}
{{ form.field|as_uni_field }}
"""
template = get_template('uni_form/field.html')
c = Context({'field':field})
return template.render(c)
|
[
"def",
"as_uni_field",
"(",
"field",
")",
":",
"template",
"=",
"get_template",
"(",
"'uni_form/field.html'",
")",
"c",
"=",
"Context",
"(",
"{",
"'field'",
":",
"field",
"}",
")",
"return",
"template",
".",
"render",
"(",
"c",
")"
] | 27.3 | 11.1 |
def fallback(cache):
"""
Caches content retrieved by the client, thus allowing the cached
content to be used later if the live content cannot be retrieved.
"""
log_filter = ThrottlingFilter(cache=cache)
logger.filters = []
logger.addFilter(log_filter)
def get_cache_response(cache_key):
content = cache.get(cache_key)
if content:
response = CacheResponse()
response.__setstate__({
'status_code': 200,
'_content': content,
})
return response
def get_cache_control(etag_cache_key):
etag = cache.get(etag_cache_key)
if etag:
return ETagCacheControl(etag)
def closure(func):
@wraps(func)
def wrapper(client, url, params={}, *args, **kwargs):
cache_key = canonicalize_url(url + '?' + urlencode(params))
etag_cache_key = 'etag-' + cache_key
try:
remote_response = func(
client,
url=url,
params=params,
cache_control=get_cache_control(etag_cache_key),
*args,
**kwargs,
)
except RequestException:
# Failed to create the request e.g., the remote server is down,
# perhaps a timeout occurred, or even connection closed by
# remote, etc.
response = get_cache_response(cache_key)
if response:
logger.error(MESSAGE_CACHE_HIT, extra={'url': url})
else:
raise
else:
log_context = {
'status_code': remote_response.status_code, 'url': url
}
if remote_response.status_code == 404:
logger.error(MESSAGE_NOT_FOUND, extra=log_context)
return LiveResponse.from_response(remote_response)
elif remote_response.status_code == 304:
response = get_cache_response(cache_key)
elif not remote_response.ok:
# Successfully requested the content, but the response is
# not OK (e.g., 500, 403, etc)
response = get_cache_response(cache_key)
if response:
logger.error(MESSAGE_CACHE_HIT, extra=log_context)
else:
logger.exception(MESSAGE_CACHE_MISS, extra=log_context)
response = FailureResponse.from_response(
remote_response
)
else:
cache.set_many({
cache_key: remote_response.content,
etag_cache_key: remote_response.headers.get('ETag'),
}, settings.DIRECTORY_CLIENT_CORE_CACHE_EXPIRE_SECONDS)
response = LiveResponse.from_response(remote_response)
return response
return wrapper
return closure
|
[
"def",
"fallback",
"(",
"cache",
")",
":",
"log_filter",
"=",
"ThrottlingFilter",
"(",
"cache",
"=",
"cache",
")",
"logger",
".",
"filters",
"=",
"[",
"]",
"logger",
".",
"addFilter",
"(",
"log_filter",
")",
"def",
"get_cache_response",
"(",
"cache_key",
")",
":",
"content",
"=",
"cache",
".",
"get",
"(",
"cache_key",
")",
"if",
"content",
":",
"response",
"=",
"CacheResponse",
"(",
")",
"response",
".",
"__setstate__",
"(",
"{",
"'status_code'",
":",
"200",
",",
"'_content'",
":",
"content",
",",
"}",
")",
"return",
"response",
"def",
"get_cache_control",
"(",
"etag_cache_key",
")",
":",
"etag",
"=",
"cache",
".",
"get",
"(",
"etag_cache_key",
")",
"if",
"etag",
":",
"return",
"ETagCacheControl",
"(",
"etag",
")",
"def",
"closure",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"client",
",",
"url",
",",
"params",
"=",
"{",
"}",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cache_key",
"=",
"canonicalize_url",
"(",
"url",
"+",
"'?'",
"+",
"urlencode",
"(",
"params",
")",
")",
"etag_cache_key",
"=",
"'etag-'",
"+",
"cache_key",
"try",
":",
"remote_response",
"=",
"func",
"(",
"client",
",",
"url",
"=",
"url",
",",
"params",
"=",
"params",
",",
"cache_control",
"=",
"get_cache_control",
"(",
"etag_cache_key",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
",",
")",
"except",
"RequestException",
":",
"# Failed to create the request e.g., the remote server is down,",
"# perhaps a timeout occurred, or even connection closed by",
"# remote, etc.",
"response",
"=",
"get_cache_response",
"(",
"cache_key",
")",
"if",
"response",
":",
"logger",
".",
"error",
"(",
"MESSAGE_CACHE_HIT",
",",
"extra",
"=",
"{",
"'url'",
":",
"url",
"}",
")",
"else",
":",
"raise",
"else",
":",
"log_context",
"=",
"{",
"'status_code'",
":",
"remote_response",
".",
"status_code",
",",
"'url'",
":",
"url",
"}",
"if",
"remote_response",
".",
"status_code",
"==",
"404",
":",
"logger",
".",
"error",
"(",
"MESSAGE_NOT_FOUND",
",",
"extra",
"=",
"log_context",
")",
"return",
"LiveResponse",
".",
"from_response",
"(",
"remote_response",
")",
"elif",
"remote_response",
".",
"status_code",
"==",
"304",
":",
"response",
"=",
"get_cache_response",
"(",
"cache_key",
")",
"elif",
"not",
"remote_response",
".",
"ok",
":",
"# Successfully requested the content, but the response is",
"# not OK (e.g., 500, 403, etc)",
"response",
"=",
"get_cache_response",
"(",
"cache_key",
")",
"if",
"response",
":",
"logger",
".",
"error",
"(",
"MESSAGE_CACHE_HIT",
",",
"extra",
"=",
"log_context",
")",
"else",
":",
"logger",
".",
"exception",
"(",
"MESSAGE_CACHE_MISS",
",",
"extra",
"=",
"log_context",
")",
"response",
"=",
"FailureResponse",
".",
"from_response",
"(",
"remote_response",
")",
"else",
":",
"cache",
".",
"set_many",
"(",
"{",
"cache_key",
":",
"remote_response",
".",
"content",
",",
"etag_cache_key",
":",
"remote_response",
".",
"headers",
".",
"get",
"(",
"'ETag'",
")",
",",
"}",
",",
"settings",
".",
"DIRECTORY_CLIENT_CORE_CACHE_EXPIRE_SECONDS",
")",
"response",
"=",
"LiveResponse",
".",
"from_response",
"(",
"remote_response",
")",
"return",
"response",
"return",
"wrapper",
"return",
"closure"
] | 39.076923 | 18.384615 |
def get_model(self):
'''
`object` of model as a function approximator,
which has `cnn` whose type is
`pydbm.cnn.pydbm.cnn.convolutional_neural_network.ConvolutionalNeuralNetwork`.
'''
class Model(object):
def __init__(self, cnn):
self.cnn = cnn
return Model(self.__cnn)
|
[
"def",
"get_model",
"(",
"self",
")",
":",
"class",
"Model",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"cnn",
")",
":",
"self",
".",
"cnn",
"=",
"cnn",
"return",
"Model",
"(",
"self",
".",
"__cnn",
")"
] | 31.363636 | 19.363636 |
def plot_vxx(self, colorbar=True, cb_orientation='vertical',
cb_label=None, ax=None, show=True, fname=None, **kwargs):
"""
Plot the Vxx component of the tensor.
Usage
-----
x.plot_vxx([tick_interval, xlabel, ylabel, ax, colorbar,
cb_orientation, cb_label, show, fname])
Parameters
----------
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
colorbar : bool, optional, default = False
If True, plot a colorbar.
cb_orientation : str, optional, default = 'vertical'
Orientation of the colorbar: either 'vertical' or 'horizontal'.
cb_label : str, optional, default = '$V_{xx}$'
Text label for the colorbar..
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
kwargs : optional
Keyword arguements that will be sent to the SHGrid.plot()
and plt.imshow() methods.
"""
if cb_label is None:
cb_label = self._vxx_label
if ax is None:
fig, axes = self.vxx.plot(colorbar=colorbar,
cb_orientation=cb_orientation,
cb_label=cb_label, show=False, **kwargs)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
else:
self.vxx.plot(colorbar=colorbar, cb_orientation=cb_orientation,
cb_label=cb_label, ax=ax, **kwargs)
|
[
"def",
"plot_vxx",
"(",
"self",
",",
"colorbar",
"=",
"True",
",",
"cb_orientation",
"=",
"'vertical'",
",",
"cb_label",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"show",
"=",
"True",
",",
"fname",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"cb_label",
"is",
"None",
":",
"cb_label",
"=",
"self",
".",
"_vxx_label",
"if",
"ax",
"is",
"None",
":",
"fig",
",",
"axes",
"=",
"self",
".",
"vxx",
".",
"plot",
"(",
"colorbar",
"=",
"colorbar",
",",
"cb_orientation",
"=",
"cb_orientation",
",",
"cb_label",
"=",
"cb_label",
",",
"show",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"if",
"show",
":",
"fig",
".",
"show",
"(",
")",
"if",
"fname",
"is",
"not",
"None",
":",
"fig",
".",
"savefig",
"(",
"fname",
")",
"return",
"fig",
",",
"axes",
"else",
":",
"self",
".",
"vxx",
".",
"plot",
"(",
"colorbar",
"=",
"colorbar",
",",
"cb_orientation",
"=",
"cb_orientation",
",",
"cb_label",
"=",
"cb_label",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")"
] | 40.679245 | 18.830189 |
def data(
self, previous_data=False, prompt=False, console_row=False,
console_row_to_cursor=False, console_row_from_cursor=False
):
""" Return output data. Flags specifies what data to append. If no flags was specified
nul-length string returned
:param previous_data: If True, then previous output appends
:param prompt: If True, then console prompt appends. If console_row or console_row_to_cursor is True, \
then this value is omitted
:param console_row: If True, then console prompt and current input appends.
:param console_row_to_cursor: If True, then console prompt and current input till cursor appends. \
If console_row is True, then this value is omitted
:param console_row_from_cursor: If True, then current input from cursor appends. \
If console_row is True, then this value is omitted
:return: str
"""
result = ''
if previous_data:
result += self.__previous_data
if prompt or console_row or console_row_to_cursor:
result += self.console().prompt()
if console_row or (console_row_from_cursor and console_row_to_cursor):
result += self.console().row()
elif console_row_to_cursor:
result += self.console().row()[:self.cursor()]
elif console_row_from_cursor:
result += self.console().row()[self.cursor():]
return result
|
[
"def",
"data",
"(",
"self",
",",
"previous_data",
"=",
"False",
",",
"prompt",
"=",
"False",
",",
"console_row",
"=",
"False",
",",
"console_row_to_cursor",
"=",
"False",
",",
"console_row_from_cursor",
"=",
"False",
")",
":",
"result",
"=",
"''",
"if",
"previous_data",
":",
"result",
"+=",
"self",
".",
"__previous_data",
"if",
"prompt",
"or",
"console_row",
"or",
"console_row_to_cursor",
":",
"result",
"+=",
"self",
".",
"console",
"(",
")",
".",
"prompt",
"(",
")",
"if",
"console_row",
"or",
"(",
"console_row_from_cursor",
"and",
"console_row_to_cursor",
")",
":",
"result",
"+=",
"self",
".",
"console",
"(",
")",
".",
"row",
"(",
")",
"elif",
"console_row_to_cursor",
":",
"result",
"+=",
"self",
".",
"console",
"(",
")",
".",
"row",
"(",
")",
"[",
":",
"self",
".",
"cursor",
"(",
")",
"]",
"elif",
"console_row_from_cursor",
":",
"result",
"+=",
"self",
".",
"console",
"(",
")",
".",
"row",
"(",
")",
"[",
"self",
".",
"cursor",
"(",
")",
":",
"]",
"return",
"result"
] | 37.029412 | 24.235294 |
def _read_lines(filepath):
"""Read a req file to a list to support nested requirement files."""
with open(filepath, 'rt', encoding='utf8') as fh:
for line in fh:
line = line.strip()
if line.startswith("-r"):
logger.debug("Reading deps from nested requirement file: %s", line)
try:
nested_filename = line.split()[1]
except IndexError:
logger.warning(
"Invalid format to indicate a nested requirements file: '%r'", line)
else:
nested_filepath = os.path.join(
os.path.dirname(filepath), nested_filename)
yield from _read_lines(nested_filepath)
else:
yield line
|
[
"def",
"_read_lines",
"(",
"filepath",
")",
":",
"with",
"open",
"(",
"filepath",
",",
"'rt'",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"fh",
":",
"for",
"line",
"in",
"fh",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"\"-r\"",
")",
":",
"logger",
".",
"debug",
"(",
"\"Reading deps from nested requirement file: %s\"",
",",
"line",
")",
"try",
":",
"nested_filename",
"=",
"line",
".",
"split",
"(",
")",
"[",
"1",
"]",
"except",
"IndexError",
":",
"logger",
".",
"warning",
"(",
"\"Invalid format to indicate a nested requirements file: '%r'\"",
",",
"line",
")",
"else",
":",
"nested_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"filepath",
")",
",",
"nested_filename",
")",
"yield",
"from",
"_read_lines",
"(",
"nested_filepath",
")",
"else",
":",
"yield",
"line"
] | 44.444444 | 17.111111 |
def predict(self, dataset, output_type='class', missing_value_action='auto'):
"""
A flexible and advanced prediction API.
The target column is provided during
:func:`~turicreate.decision_tree.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
output_type : {'probability', 'margin', 'class', 'probability_vector'}, optional.
Form of the predictions which are one of:
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'margin': Margin associated with the prediction (not applicable
for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, evaluate, classify
Examples
--------
>>> m.predict(testdata)
>>> m.predict(testdata, output_type='probability')
>>> m.predict(testdata, output_type='margin')
"""
_check_categorical_option_type('output_type', output_type,
['class', 'margin', 'probability', 'probability_vector'])
return super(_Classifier, self).predict(dataset,
output_type=output_type,
missing_value_action=missing_value_action)
|
[
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"'class'",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"_check_categorical_option_type",
"(",
"'output_type'",
",",
"output_type",
",",
"[",
"'class'",
",",
"'margin'",
",",
"'probability'",
",",
"'probability_vector'",
"]",
")",
"return",
"super",
"(",
"_Classifier",
",",
"self",
")",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"output_type",
",",
"missing_value_action",
"=",
"missing_value_action",
")"
] | 43.290323 | 26.064516 |
def get_enabled_features(self, user_id, attributes=None):
""" Returns the list of features that are enabled for the user.
Args:
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
A list of the keys of the features that are enabled for the user.
"""
enabled_features = []
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_enabled_features'))
return enabled_features
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return enabled_features
if not self._validate_user_inputs(attributes):
return enabled_features
for feature in self.config.feature_key_map.values():
if self.is_feature_enabled(feature.key, user_id, attributes):
enabled_features.append(feature.key)
return enabled_features
|
[
"def",
"get_enabled_features",
"(",
"self",
",",
"user_id",
",",
"attributes",
"=",
"None",
")",
":",
"enabled_features",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"is_valid",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_DATAFILE",
".",
"format",
"(",
"'get_enabled_features'",
")",
")",
"return",
"enabled_features",
"if",
"not",
"isinstance",
"(",
"user_id",
",",
"string_types",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"enums",
".",
"Errors",
".",
"INVALID_INPUT_ERROR",
".",
"format",
"(",
"'user_id'",
")",
")",
"return",
"enabled_features",
"if",
"not",
"self",
".",
"_validate_user_inputs",
"(",
"attributes",
")",
":",
"return",
"enabled_features",
"for",
"feature",
"in",
"self",
".",
"config",
".",
"feature_key_map",
".",
"values",
"(",
")",
":",
"if",
"self",
".",
"is_feature_enabled",
"(",
"feature",
".",
"key",
",",
"user_id",
",",
"attributes",
")",
":",
"enabled_features",
".",
"append",
"(",
"feature",
".",
"key",
")",
"return",
"enabled_features"
] | 31.714286 | 22.5 |
def density_dir(CIJ):
'''
Density is the fraction of present connections to possible connections.
Parameters
----------
CIJ : NxN np.ndarray
directed weighted/binary connection matrix
Returns
-------
kden : float
density
N : int
number of vertices
k : int
number of edges
Notes
-----
Assumes CIJ is directed and has no self-connections.
Weight information is discarded.
'''
n = len(CIJ)
k = np.size(np.where(CIJ.flatten()))
kden = k / (n * n - n)
return kden, n, k
|
[
"def",
"density_dir",
"(",
"CIJ",
")",
":",
"n",
"=",
"len",
"(",
"CIJ",
")",
"k",
"=",
"np",
".",
"size",
"(",
"np",
".",
"where",
"(",
"CIJ",
".",
"flatten",
"(",
")",
")",
")",
"kden",
"=",
"k",
"/",
"(",
"n",
"*",
"n",
"-",
"n",
")",
"return",
"kden",
",",
"n",
",",
"k"
] | 20.333333 | 24.185185 |
def fillNullValues(col, rows):
'Fill null cells in col with the previous non-null value'
lastval = None
nullfunc = isNullFunc()
n = 0
rowsToFill = list(rows)
for r in Progress(col.sheet.rows, 'filling'): # loop over all rows
try:
val = col.getValue(r)
except Exception as e:
val = e
if nullfunc(val) and r in rowsToFill:
if lastval:
col.setValue(r, lastval)
n += 1
else:
lastval = val
col.recalc()
status("filled %d values" % n)
|
[
"def",
"fillNullValues",
"(",
"col",
",",
"rows",
")",
":",
"lastval",
"=",
"None",
"nullfunc",
"=",
"isNullFunc",
"(",
")",
"n",
"=",
"0",
"rowsToFill",
"=",
"list",
"(",
"rows",
")",
"for",
"r",
"in",
"Progress",
"(",
"col",
".",
"sheet",
".",
"rows",
",",
"'filling'",
")",
":",
"# loop over all rows",
"try",
":",
"val",
"=",
"col",
".",
"getValue",
"(",
"r",
")",
"except",
"Exception",
"as",
"e",
":",
"val",
"=",
"e",
"if",
"nullfunc",
"(",
"val",
")",
"and",
"r",
"in",
"rowsToFill",
":",
"if",
"lastval",
":",
"col",
".",
"setValue",
"(",
"r",
",",
"lastval",
")",
"n",
"+=",
"1",
"else",
":",
"lastval",
"=",
"val",
"col",
".",
"recalc",
"(",
")",
"status",
"(",
"\"filled %d values\"",
"%",
"n",
")"
] | 26.428571 | 19 |
def fetch_all(self, api_client, fetchstatuslogger, q, targets):
'''
Make all API calls as defined in metadata.json
:param api_client:
:param fetchstatuslogger:
:param q:
:param targets:
:return:
'''
self.fetchstatuslogger = fetchstatuslogger
if targets != None:
# Ensure targets is a tuple
if type(targets) != list and type(targets) != tuple:
targets = tuple(targets,)
elif type(targets) != tuple:
targets = tuple(targets)
for target in targets:
self._fetch_targets(api_client, q, target)
|
[
"def",
"fetch_all",
"(",
"self",
",",
"api_client",
",",
"fetchstatuslogger",
",",
"q",
",",
"targets",
")",
":",
"self",
".",
"fetchstatuslogger",
"=",
"fetchstatuslogger",
"if",
"targets",
"!=",
"None",
":",
"# Ensure targets is a tuple",
"if",
"type",
"(",
"targets",
")",
"!=",
"list",
"and",
"type",
"(",
"targets",
")",
"!=",
"tuple",
":",
"targets",
"=",
"tuple",
"(",
"targets",
",",
")",
"elif",
"type",
"(",
"targets",
")",
"!=",
"tuple",
":",
"targets",
"=",
"tuple",
"(",
"targets",
")",
"for",
"target",
"in",
"targets",
":",
"self",
".",
"_fetch_targets",
"(",
"api_client",
",",
"q",
",",
"target",
")"
] | 33.631579 | 15.421053 |
def is_excluded(root, excludes):
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
root = os.path.normpath(root)
for exclude in excludes:
if root == exclude:
return True
return False
|
[
"def",
"is_excluded",
"(",
"root",
",",
"excludes",
")",
":",
"root",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"root",
")",
"for",
"exclude",
"in",
"excludes",
":",
"if",
"root",
"==",
"exclude",
":",
"return",
"True",
"return",
"False"
] | 32.727273 | 16.545455 |
def _load_manifest_interpret_source(manifest, source, username=None, password=None, verify_certificate=True, do_inherit=True):
""" Interpret the <source>, and load the results into <manifest> """
try:
if isinstance(source, string_types):
if source.startswith("http"):
# if manifest is a url
_load_manifest_from_url(manifest, source,
verify_certificate=verify_certificate,
username=username, password=password)
else:
_load_manifest_from_file(manifest, source)
if not manifest.has_option('config', 'source'):
manifest.set('config', 'source', str(source))
else:
# assume source is a file pointer
manifest.readfp(source)
if manifest.has_option('config', 'extends') and do_inherit:
parent_manifest = configparser.RawConfigParser()
_load_manifest_interpret_source(parent_manifest,
manifest.get('config', 'extends'),
username=username,
password=password,
verify_certificate=verify_certificate)
for s in parent_manifest.sections():
for k, v in parent_manifest.items(s):
if not manifest.has_option(s, k):
manifest.set(s, k, v)
except configparser.Error:
logger.debug("", exc_info=True)
error_message = sys.exc_info()[1]
raise ManifestException("Unable to parse manifest!: {0}".format(error_message))
|
[
"def",
"_load_manifest_interpret_source",
"(",
"manifest",
",",
"source",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"verify_certificate",
"=",
"True",
",",
"do_inherit",
"=",
"True",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"source",
",",
"string_types",
")",
":",
"if",
"source",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"# if manifest is a url",
"_load_manifest_from_url",
"(",
"manifest",
",",
"source",
",",
"verify_certificate",
"=",
"verify_certificate",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"else",
":",
"_load_manifest_from_file",
"(",
"manifest",
",",
"source",
")",
"if",
"not",
"manifest",
".",
"has_option",
"(",
"'config'",
",",
"'source'",
")",
":",
"manifest",
".",
"set",
"(",
"'config'",
",",
"'source'",
",",
"str",
"(",
"source",
")",
")",
"else",
":",
"# assume source is a file pointer",
"manifest",
".",
"readfp",
"(",
"source",
")",
"if",
"manifest",
".",
"has_option",
"(",
"'config'",
",",
"'extends'",
")",
"and",
"do_inherit",
":",
"parent_manifest",
"=",
"configparser",
".",
"RawConfigParser",
"(",
")",
"_load_manifest_interpret_source",
"(",
"parent_manifest",
",",
"manifest",
".",
"get",
"(",
"'config'",
",",
"'extends'",
")",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"verify_certificate",
"=",
"verify_certificate",
")",
"for",
"s",
"in",
"parent_manifest",
".",
"sections",
"(",
")",
":",
"for",
"k",
",",
"v",
"in",
"parent_manifest",
".",
"items",
"(",
"s",
")",
":",
"if",
"not",
"manifest",
".",
"has_option",
"(",
"s",
",",
"k",
")",
":",
"manifest",
".",
"set",
"(",
"s",
",",
"k",
",",
"v",
")",
"except",
"configparser",
".",
"Error",
":",
"logger",
".",
"debug",
"(",
"\"\"",
",",
"exc_info",
"=",
"True",
")",
"error_message",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"raise",
"ManifestException",
"(",
"\"Unable to parse manifest!: {0}\"",
".",
"format",
"(",
"error_message",
")",
")"
] | 53 | 20.75 |
def _accumulate(sequence, func):
"""
Python2 accumulate implementation taken from
https://docs.python.org/3/library/itertools.html#itertools.accumulate
"""
iterator = iter(sequence)
total = next(iterator)
yield total
for element in iterator:
total = func(total, element)
yield total
|
[
"def",
"_accumulate",
"(",
"sequence",
",",
"func",
")",
":",
"iterator",
"=",
"iter",
"(",
"sequence",
")",
"total",
"=",
"next",
"(",
"iterator",
")",
"yield",
"total",
"for",
"element",
"in",
"iterator",
":",
"total",
"=",
"func",
"(",
"total",
",",
"element",
")",
"yield",
"total"
] | 29.090909 | 12.363636 |
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt)
|
[
"def",
"_smallest_integer_by_dtype",
"(",
"dt",
")",
":",
"if",
"not",
"_is_known_dtype",
"(",
"dt",
")",
":",
"raise",
"TypeError",
"(",
"\"Unrecognized dtype: {}\"",
".",
"format",
"(",
"dt",
".",
"name",
")",
")",
"if",
"_is_known_unsigned_by_dtype",
"(",
"dt",
")",
":",
"return",
"0",
"return",
"-",
"1",
"*",
"_largest_integer_by_dtype",
"(",
"dt",
")"
] | 42 | 10.142857 |
def open_interface_async(self, conn_id, interface, callback, connection_string=None):
"""Asynchronously connect to a device."""
future = self._loop.launch_coroutine(self._adapter.open_interface(conn_id, interface))
future.add_done_callback(lambda x: self._callback_future(conn_id, x, callback))
|
[
"def",
"open_interface_async",
"(",
"self",
",",
"conn_id",
",",
"interface",
",",
"callback",
",",
"connection_string",
"=",
"None",
")",
":",
"future",
"=",
"self",
".",
"_loop",
".",
"launch_coroutine",
"(",
"self",
".",
"_adapter",
".",
"open_interface",
"(",
"conn_id",
",",
"interface",
")",
")",
"future",
".",
"add_done_callback",
"(",
"lambda",
"x",
":",
"self",
".",
"_callback_future",
"(",
"conn_id",
",",
"x",
",",
"callback",
")",
")"
] | 63 | 37.2 |
def session_check_name(session_name):
"""
Raises exception session name invalid, modeled after tmux function.
tmux(1) session names may not be empty, or include periods or colons.
These delimiters are reserved for noting session, window and pane.
Parameters
----------
session_name : str
Name of session.
Raises
------
:exc:`exc.BadSessionName`
Invalid session name.
"""
if not session_name or len(session_name) == 0:
raise exc.BadSessionName("tmux session names may not be empty.")
elif '.' in session_name:
raise exc.BadSessionName(
"tmux session name \"%s\" may not contain periods.", session_name
)
elif ':' in session_name:
raise exc.BadSessionName(
"tmux session name \"%s\" may not contain colons.", session_name
)
|
[
"def",
"session_check_name",
"(",
"session_name",
")",
":",
"if",
"not",
"session_name",
"or",
"len",
"(",
"session_name",
")",
"==",
"0",
":",
"raise",
"exc",
".",
"BadSessionName",
"(",
"\"tmux session names may not be empty.\"",
")",
"elif",
"'.'",
"in",
"session_name",
":",
"raise",
"exc",
".",
"BadSessionName",
"(",
"\"tmux session name \\\"%s\\\" may not contain periods.\"",
",",
"session_name",
")",
"elif",
"':'",
"in",
"session_name",
":",
"raise",
"exc",
".",
"BadSessionName",
"(",
"\"tmux session name \\\"%s\\\" may not contain colons.\"",
",",
"session_name",
")"
] | 30.888889 | 22.148148 |
def compute_layout_properties(
width, height, frame_width, frame_height, explicit_width,
explicit_height, aspect, data_aspect, responsive, size_multiplier,
logger=None):
"""
Utility to compute the aspect, plot width/height and sizing_mode
behavior.
Args:
width (int): Plot width
height (int): Plot height
frame_width (int): Plot frame width
frame_height (int): Plot frame height
explicit_width (list): List of user supplied widths
explicit_height (list): List of user supplied heights
aspect (float): Plot aspect
data_aspect (float): Scaling between x-axis and y-axis ranges
responsive (boolean): Whether the plot should resize responsively
size_multiplier (float): Multiplier for supplied plot dimensions
logger (param.Parameters): Parameters object to issue warnings on
Returns:
Returns two dictionaries one for the aspect and sizing modes,
and another for the plot dimensions.
"""
fixed_width = (explicit_width or frame_width)
fixed_height = (explicit_height or frame_height)
fixed_aspect = aspect or data_aspect
aspect = 1 if aspect == 'square' else aspect
# Plot dimensions
height = None if height is None else int(height*size_multiplier)
width = None if width is None else int(width*size_multiplier)
frame_height = None if frame_height is None else int(frame_height*size_multiplier)
frame_width = None if frame_width is None else int(frame_width*size_multiplier)
actual_width = frame_width or width
actual_height = frame_height or height
if frame_width is not None:
width = None
if frame_height is not None:
height = None
sizing_mode = 'fixed'
if responsive:
if fixed_height and fixed_width:
responsive = False
if logger:
logger.warning("responsive mode could not be enabled "
"because fixed width and height were "
"specified.")
elif fixed_width:
height = None
sizing_mode = 'fixed' if fixed_aspect else 'stretch_height'
elif fixed_height:
width = None
sizing_mode = 'fixed' if fixed_aspect else 'stretch_width'
else:
width, height = None, None
if fixed_aspect:
if responsive == 'width':
sizing_mode = 'scale_width'
elif responsive == 'height':
sizing_mode = 'scale_height'
else:
sizing_mode = 'scale_both'
else:
if responsive == 'width':
sizing_mode = 'stretch_both'
elif responsive == 'height':
sizing_mode = 'stretch_height'
else:
sizing_mode = 'stretch_both'
if fixed_aspect:
aspect_type = 'data_aspect' if data_aspect else 'aspect'
if fixed_width and fixed_height:
if not data_aspect:
aspect = None
if logger:
logger.warning(
"%s value was ignored because absolute width and "
"height values were provided. Either supply "
"explicit frame_width and frame_height to achieve "
"desired aspect OR supply a combination of width "
"or height and an aspect value." % aspect_type)
elif fixed_width and responsive:
height = None
responsive = False
if logger:
logger.warning("responsive mode could not be enabled "
"because fixed width and aspect were "
"specified.")
elif fixed_height and responsive:
width = None
responsive = False
if logger:
logger.warning("responsive mode could not be enabled "
"because fixed height and aspect were "
"specified.")
elif responsive == 'width':
sizing_mode = 'scale_width'
elif responsive == 'height':
sizing_mode = 'scale_height'
if responsive == 'width' and fixed_width:
responsive = False
if logger:
logger.warning("responsive width mode could not be enabled "
"because a fixed width was defined.")
if responsive == 'height' and fixed_height:
responsive = False
if logger:
logger.warning("responsive height mode could not be enabled "
"because a fixed height was defined.")
match_aspect = False
aspect_scale = 1
aspect_ratio = None
if (fixed_width and fixed_height):
pass
elif data_aspect or aspect == 'equal':
match_aspect = True
if fixed_width or not fixed_height:
height = None
if fixed_height or not fixed_width:
width = None
aspect_scale = data_aspect
if aspect == 'equal':
aspect_scale = 1
elif responsive:
aspect_ratio = aspect
elif isnumeric(aspect):
if responsive:
aspect_ratio = aspect
elif fixed_width:
frame_width = actual_width
frame_height = int(actual_width/aspect)
width, height = None, None
else:
frame_width = int(actual_height*aspect)
frame_height = actual_height
width, height = None, None
elif aspect is not None and logger:
logger.warning('aspect value of type %s not recognized, '
'provide a numeric value, \'equal\' or '
'\'square\'.')
return ({'aspect_ratio': aspect_ratio,
'aspect_scale': aspect_scale,
'match_aspect': match_aspect,
'sizing_mode' : sizing_mode},
{'frame_width' : frame_width,
'frame_height': frame_height,
'plot_height' : height,
'plot_width' : width})
|
[
"def",
"compute_layout_properties",
"(",
"width",
",",
"height",
",",
"frame_width",
",",
"frame_height",
",",
"explicit_width",
",",
"explicit_height",
",",
"aspect",
",",
"data_aspect",
",",
"responsive",
",",
"size_multiplier",
",",
"logger",
"=",
"None",
")",
":",
"fixed_width",
"=",
"(",
"explicit_width",
"or",
"frame_width",
")",
"fixed_height",
"=",
"(",
"explicit_height",
"or",
"frame_height",
")",
"fixed_aspect",
"=",
"aspect",
"or",
"data_aspect",
"aspect",
"=",
"1",
"if",
"aspect",
"==",
"'square'",
"else",
"aspect",
"# Plot dimensions",
"height",
"=",
"None",
"if",
"height",
"is",
"None",
"else",
"int",
"(",
"height",
"*",
"size_multiplier",
")",
"width",
"=",
"None",
"if",
"width",
"is",
"None",
"else",
"int",
"(",
"width",
"*",
"size_multiplier",
")",
"frame_height",
"=",
"None",
"if",
"frame_height",
"is",
"None",
"else",
"int",
"(",
"frame_height",
"*",
"size_multiplier",
")",
"frame_width",
"=",
"None",
"if",
"frame_width",
"is",
"None",
"else",
"int",
"(",
"frame_width",
"*",
"size_multiplier",
")",
"actual_width",
"=",
"frame_width",
"or",
"width",
"actual_height",
"=",
"frame_height",
"or",
"height",
"if",
"frame_width",
"is",
"not",
"None",
":",
"width",
"=",
"None",
"if",
"frame_height",
"is",
"not",
"None",
":",
"height",
"=",
"None",
"sizing_mode",
"=",
"'fixed'",
"if",
"responsive",
":",
"if",
"fixed_height",
"and",
"fixed_width",
":",
"responsive",
"=",
"False",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"responsive mode could not be enabled \"",
"\"because fixed width and height were \"",
"\"specified.\"",
")",
"elif",
"fixed_width",
":",
"height",
"=",
"None",
"sizing_mode",
"=",
"'fixed'",
"if",
"fixed_aspect",
"else",
"'stretch_height'",
"elif",
"fixed_height",
":",
"width",
"=",
"None",
"sizing_mode",
"=",
"'fixed'",
"if",
"fixed_aspect",
"else",
"'stretch_width'",
"else",
":",
"width",
",",
"height",
"=",
"None",
",",
"None",
"if",
"fixed_aspect",
":",
"if",
"responsive",
"==",
"'width'",
":",
"sizing_mode",
"=",
"'scale_width'",
"elif",
"responsive",
"==",
"'height'",
":",
"sizing_mode",
"=",
"'scale_height'",
"else",
":",
"sizing_mode",
"=",
"'scale_both'",
"else",
":",
"if",
"responsive",
"==",
"'width'",
":",
"sizing_mode",
"=",
"'stretch_both'",
"elif",
"responsive",
"==",
"'height'",
":",
"sizing_mode",
"=",
"'stretch_height'",
"else",
":",
"sizing_mode",
"=",
"'stretch_both'",
"if",
"fixed_aspect",
":",
"aspect_type",
"=",
"'data_aspect'",
"if",
"data_aspect",
"else",
"'aspect'",
"if",
"fixed_width",
"and",
"fixed_height",
":",
"if",
"not",
"data_aspect",
":",
"aspect",
"=",
"None",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"%s value was ignored because absolute width and \"",
"\"height values were provided. Either supply \"",
"\"explicit frame_width and frame_height to achieve \"",
"\"desired aspect OR supply a combination of width \"",
"\"or height and an aspect value.\"",
"%",
"aspect_type",
")",
"elif",
"fixed_width",
"and",
"responsive",
":",
"height",
"=",
"None",
"responsive",
"=",
"False",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"responsive mode could not be enabled \"",
"\"because fixed width and aspect were \"",
"\"specified.\"",
")",
"elif",
"fixed_height",
"and",
"responsive",
":",
"width",
"=",
"None",
"responsive",
"=",
"False",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"responsive mode could not be enabled \"",
"\"because fixed height and aspect were \"",
"\"specified.\"",
")",
"elif",
"responsive",
"==",
"'width'",
":",
"sizing_mode",
"=",
"'scale_width'",
"elif",
"responsive",
"==",
"'height'",
":",
"sizing_mode",
"=",
"'scale_height'",
"if",
"responsive",
"==",
"'width'",
"and",
"fixed_width",
":",
"responsive",
"=",
"False",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"responsive width mode could not be enabled \"",
"\"because a fixed width was defined.\"",
")",
"if",
"responsive",
"==",
"'height'",
"and",
"fixed_height",
":",
"responsive",
"=",
"False",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"responsive height mode could not be enabled \"",
"\"because a fixed height was defined.\"",
")",
"match_aspect",
"=",
"False",
"aspect_scale",
"=",
"1",
"aspect_ratio",
"=",
"None",
"if",
"(",
"fixed_width",
"and",
"fixed_height",
")",
":",
"pass",
"elif",
"data_aspect",
"or",
"aspect",
"==",
"'equal'",
":",
"match_aspect",
"=",
"True",
"if",
"fixed_width",
"or",
"not",
"fixed_height",
":",
"height",
"=",
"None",
"if",
"fixed_height",
"or",
"not",
"fixed_width",
":",
"width",
"=",
"None",
"aspect_scale",
"=",
"data_aspect",
"if",
"aspect",
"==",
"'equal'",
":",
"aspect_scale",
"=",
"1",
"elif",
"responsive",
":",
"aspect_ratio",
"=",
"aspect",
"elif",
"isnumeric",
"(",
"aspect",
")",
":",
"if",
"responsive",
":",
"aspect_ratio",
"=",
"aspect",
"elif",
"fixed_width",
":",
"frame_width",
"=",
"actual_width",
"frame_height",
"=",
"int",
"(",
"actual_width",
"/",
"aspect",
")",
"width",
",",
"height",
"=",
"None",
",",
"None",
"else",
":",
"frame_width",
"=",
"int",
"(",
"actual_height",
"*",
"aspect",
")",
"frame_height",
"=",
"actual_height",
"width",
",",
"height",
"=",
"None",
",",
"None",
"elif",
"aspect",
"is",
"not",
"None",
"and",
"logger",
":",
"logger",
".",
"warning",
"(",
"'aspect value of type %s not recognized, '",
"'provide a numeric value, \\'equal\\' or '",
"'\\'square\\'.'",
")",
"return",
"(",
"{",
"'aspect_ratio'",
":",
"aspect_ratio",
",",
"'aspect_scale'",
":",
"aspect_scale",
",",
"'match_aspect'",
":",
"match_aspect",
",",
"'sizing_mode'",
":",
"sizing_mode",
"}",
",",
"{",
"'frame_width'",
":",
"frame_width",
",",
"'frame_height'",
":",
"frame_height",
",",
"'plot_height'",
":",
"height",
",",
"'plot_width'",
":",
"width",
"}",
")"
] | 37.943038 | 16.056962 |
def create(ctx):
""" Create default config file
"""
import shutil
this_dir, this_filename = os.path.split(__file__)
default_config_file = os.path.join(this_dir, "apis/example-config.yaml")
config_file = ctx.obj["configfile"]
shutil.copyfile(default_config_file, config_file)
print_message("Config file created: {}".format(config_file))
|
[
"def",
"create",
"(",
"ctx",
")",
":",
"import",
"shutil",
"this_dir",
",",
"this_filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"__file__",
")",
"default_config_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"this_dir",
",",
"\"apis/example-config.yaml\"",
")",
"config_file",
"=",
"ctx",
".",
"obj",
"[",
"\"configfile\"",
"]",
"shutil",
".",
"copyfile",
"(",
"default_config_file",
",",
"config_file",
")",
"print_message",
"(",
"\"Config file created: {}\"",
".",
"format",
"(",
"config_file",
")",
")"
] | 35.9 | 17.4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.