text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def find_item_project(self, eitem):
"""
Find the project for a enriched item
:param eitem: enriched item for which to find the project
:return: the project entry (a dictionary)
"""
# get the data source name relying on the cfg section name, if null use the connector name
ds_name = self.cfg_section_name if self.cfg_section_name else self.get_connector_name()
try:
# retrieve the project which includes the repo url in the projects.json,
# the variable `projects_json_repo` is passed from mordred to ELK when
# iterating over the repos in the projects.json, (see: param
# `projects_json_repo` in the functions elk.feed_backend and
# elk.enrich_backend)
if self.projects_json_repo:
project = self.prjs_map[ds_name][self.projects_json_repo]
# if `projects_json_repo`, which shouldn't never happen, use the
# method `get_project_repository` (defined in each enricher)
else:
repository = self.get_project_repository(eitem)
project = self.prjs_map[ds_name][repository]
# With the introduction of `projects_json_repo` the code in the
# except should be unreachable, and could be removed
except KeyError:
# logger.warning("Project not found for repository %s (data source: %s)", repository, ds_name)
project = None
if self.filter_raw:
fltr = eitem['origin'] + ' --filter-raw=' + self.filter_raw
if ds_name in self.prjs_map and fltr in self.prjs_map[ds_name]:
project = self.prjs_map[ds_name][fltr]
if project == UNKNOWN_PROJECT:
return None
if project:
return project
# Try to use always the origin in any case
if 'origin' in eitem:
if ds_name in self.prjs_map and eitem['origin'] in self.prjs_map[ds_name]:
project = self.prjs_map[ds_name][eitem['origin']]
elif ds_name in self.prjs_map:
# Try to find origin as part of the keys
for ds_repo in self.prjs_map[ds_name]:
ds_repo = str(ds_repo) # discourse has category_id ints
if eitem['origin'] in ds_repo:
project = self.prjs_map[ds_name][ds_repo]
break
if project == UNKNOWN_PROJECT:
project = None
return project
|
[
"def",
"find_item_project",
"(",
"self",
",",
"eitem",
")",
":",
"# get the data source name relying on the cfg section name, if null use the connector name",
"ds_name",
"=",
"self",
".",
"cfg_section_name",
"if",
"self",
".",
"cfg_section_name",
"else",
"self",
".",
"get_connector_name",
"(",
")",
"try",
":",
"# retrieve the project which includes the repo url in the projects.json,",
"# the variable `projects_json_repo` is passed from mordred to ELK when",
"# iterating over the repos in the projects.json, (see: param",
"# `projects_json_repo` in the functions elk.feed_backend and",
"# elk.enrich_backend)",
"if",
"self",
".",
"projects_json_repo",
":",
"project",
"=",
"self",
".",
"prjs_map",
"[",
"ds_name",
"]",
"[",
"self",
".",
"projects_json_repo",
"]",
"# if `projects_json_repo`, which shouldn't never happen, use the",
"# method `get_project_repository` (defined in each enricher)",
"else",
":",
"repository",
"=",
"self",
".",
"get_project_repository",
"(",
"eitem",
")",
"project",
"=",
"self",
".",
"prjs_map",
"[",
"ds_name",
"]",
"[",
"repository",
"]",
"# With the introduction of `projects_json_repo` the code in the",
"# except should be unreachable, and could be removed",
"except",
"KeyError",
":",
"# logger.warning(\"Project not found for repository %s (data source: %s)\", repository, ds_name)",
"project",
"=",
"None",
"if",
"self",
".",
"filter_raw",
":",
"fltr",
"=",
"eitem",
"[",
"'origin'",
"]",
"+",
"' --filter-raw='",
"+",
"self",
".",
"filter_raw",
"if",
"ds_name",
"in",
"self",
".",
"prjs_map",
"and",
"fltr",
"in",
"self",
".",
"prjs_map",
"[",
"ds_name",
"]",
":",
"project",
"=",
"self",
".",
"prjs_map",
"[",
"ds_name",
"]",
"[",
"fltr",
"]",
"if",
"project",
"==",
"UNKNOWN_PROJECT",
":",
"return",
"None",
"if",
"project",
":",
"return",
"project",
"# Try to use always the origin in any case",
"if",
"'origin'",
"in",
"eitem",
":",
"if",
"ds_name",
"in",
"self",
".",
"prjs_map",
"and",
"eitem",
"[",
"'origin'",
"]",
"in",
"self",
".",
"prjs_map",
"[",
"ds_name",
"]",
":",
"project",
"=",
"self",
".",
"prjs_map",
"[",
"ds_name",
"]",
"[",
"eitem",
"[",
"'origin'",
"]",
"]",
"elif",
"ds_name",
"in",
"self",
".",
"prjs_map",
":",
"# Try to find origin as part of the keys",
"for",
"ds_repo",
"in",
"self",
".",
"prjs_map",
"[",
"ds_name",
"]",
":",
"ds_repo",
"=",
"str",
"(",
"ds_repo",
")",
"# discourse has category_id ints",
"if",
"eitem",
"[",
"'origin'",
"]",
"in",
"ds_repo",
":",
"project",
"=",
"self",
".",
"prjs_map",
"[",
"ds_name",
"]",
"[",
"ds_repo",
"]",
"break",
"if",
"project",
"==",
"UNKNOWN_PROJECT",
":",
"project",
"=",
"None",
"return",
"project"
] | 47.166667 | 24.203704 |
def delete_item(self, item_uri):
"""Delete an item from a collection
:param item_uri: the URI that references the item
:type item_uri: String
:rtype: String
:returns: a message confirming that the metadata is modified
:raises: APIError if the request was not successful
"""
response = self.api_request(item_uri, method='DELETE')
return self.__check_success(response)
|
[
"def",
"delete_item",
"(",
"self",
",",
"item_uri",
")",
":",
"response",
"=",
"self",
".",
"api_request",
"(",
"item_uri",
",",
"method",
"=",
"'DELETE'",
")",
"return",
"self",
".",
"__check_success",
"(",
"response",
")"
] | 30.642857 | 20.5 |
def bezier_radialrange(seg, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min) and (d_max, t_max) which minimize and
maximize, respectively, the distance d = |self.point(t)-origin|.
return_all_global_extrema: Multiple such t_min or t_max values can exist.
By default, this will only return one. Set return_all_global_extrema=True
to return all such global extrema."""
def _radius(tau):
return abs(seg.point(tau) - origin)
shifted_seg_poly = seg.poly() - origin
r_squared = real(shifted_seg_poly) ** 2 + \
imag(shifted_seg_poly) ** 2
extremizers = [0, 1] + polyroots01(r_squared.deriv())
extrema = [(_radius(t), t) for t in extremizers]
if return_all_global_extrema:
raise NotImplementedError
else:
seg_global_min = min(extrema, key=itemgetter(0))
seg_global_max = max(extrema, key=itemgetter(0))
return seg_global_min, seg_global_max
|
[
"def",
"bezier_radialrange",
"(",
"seg",
",",
"origin",
",",
"return_all_global_extrema",
"=",
"False",
")",
":",
"def",
"_radius",
"(",
"tau",
")",
":",
"return",
"abs",
"(",
"seg",
".",
"point",
"(",
"tau",
")",
"-",
"origin",
")",
"shifted_seg_poly",
"=",
"seg",
".",
"poly",
"(",
")",
"-",
"origin",
"r_squared",
"=",
"real",
"(",
"shifted_seg_poly",
")",
"**",
"2",
"+",
"imag",
"(",
"shifted_seg_poly",
")",
"**",
"2",
"extremizers",
"=",
"[",
"0",
",",
"1",
"]",
"+",
"polyroots01",
"(",
"r_squared",
".",
"deriv",
"(",
")",
")",
"extrema",
"=",
"[",
"(",
"_radius",
"(",
"t",
")",
",",
"t",
")",
"for",
"t",
"in",
"extremizers",
"]",
"if",
"return_all_global_extrema",
":",
"raise",
"NotImplementedError",
"else",
":",
"seg_global_min",
"=",
"min",
"(",
"extrema",
",",
"key",
"=",
"itemgetter",
"(",
"0",
")",
")",
"seg_global_max",
"=",
"max",
"(",
"extrema",
",",
"key",
"=",
"itemgetter",
"(",
"0",
")",
")",
"return",
"seg_global_min",
",",
"seg_global_max"
] | 43.090909 | 18.045455 |
def setArrowStyle( self, state ):
"""
Sets whether or not to use arrows for the grouping mechanism.
:param state | <bool>
"""
self._arrowStyle = state
if not state:
self.setStyleSheet('')
else:
right = resources.find('img/treeview/triangle_right.png')
down = resources.find('img/treeview/triangle_down.png')
opts = (right.replace('\\', '/'), down.replace('\\', '/'))
self.setStyleSheet(ARROW_STYLESHEET % opts)
|
[
"def",
"setArrowStyle",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"_arrowStyle",
"=",
"state",
"if",
"not",
"state",
":",
"self",
".",
"setStyleSheet",
"(",
"''",
")",
"else",
":",
"right",
"=",
"resources",
".",
"find",
"(",
"'img/treeview/triangle_right.png'",
")",
"down",
"=",
"resources",
".",
"find",
"(",
"'img/treeview/triangle_down.png'",
")",
"opts",
"=",
"(",
"right",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
",",
"down",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
")",
"self",
".",
"setStyleSheet",
"(",
"ARROW_STYLESHEET",
"%",
"opts",
")"
] | 36.733333 | 17.666667 |
def unmasked_blurred_image_of_galaxies_from_psf(self, padded_grid_stack, psf):
"""This is a utility function for the function above, which performs the iteration over each plane's galaxies \
and computes each galaxy's unmasked blurred image.
Parameters
----------
padded_grid_stack
psf : ccd.PSF
The PSF of the image used for convolution.
"""
return [padded_grid_stack.unmasked_blurred_image_from_psf_and_unmasked_image(
psf, image) if not galaxy.has_pixelization else None for galaxy, image in
zip(self.galaxies, self.image_plane_image_1d_of_galaxies)]
|
[
"def",
"unmasked_blurred_image_of_galaxies_from_psf",
"(",
"self",
",",
"padded_grid_stack",
",",
"psf",
")",
":",
"return",
"[",
"padded_grid_stack",
".",
"unmasked_blurred_image_from_psf_and_unmasked_image",
"(",
"psf",
",",
"image",
")",
"if",
"not",
"galaxy",
".",
"has_pixelization",
"else",
"None",
"for",
"galaxy",
",",
"image",
"in",
"zip",
"(",
"self",
".",
"galaxies",
",",
"self",
".",
"image_plane_image_1d_of_galaxies",
")",
"]"
] | 49.692308 | 24 |
def fit(self, struct1, struct2):
"""
Fit two structures.
Args:
struct1 (Structure): 1st structure
struct2 (Structure): 2nd structure
Returns:
True or False.
"""
struct1, struct2 = self._process_species([struct1, struct2])
if not self._subset and self._comparator.get_hash(struct1.composition) \
!= self._comparator.get_hash(struct2.composition):
return None
struct1, struct2, fu, s1_supercell = self._preprocess(struct1, struct2)
match = self._match(struct1, struct2, fu, s1_supercell,
break_on_match=True)
if match is None:
return False
else:
return match[0] <= self.stol
|
[
"def",
"fit",
"(",
"self",
",",
"struct1",
",",
"struct2",
")",
":",
"struct1",
",",
"struct2",
"=",
"self",
".",
"_process_species",
"(",
"[",
"struct1",
",",
"struct2",
"]",
")",
"if",
"not",
"self",
".",
"_subset",
"and",
"self",
".",
"_comparator",
".",
"get_hash",
"(",
"struct1",
".",
"composition",
")",
"!=",
"self",
".",
"_comparator",
".",
"get_hash",
"(",
"struct2",
".",
"composition",
")",
":",
"return",
"None",
"struct1",
",",
"struct2",
",",
"fu",
",",
"s1_supercell",
"=",
"self",
".",
"_preprocess",
"(",
"struct1",
",",
"struct2",
")",
"match",
"=",
"self",
".",
"_match",
"(",
"struct1",
",",
"struct2",
",",
"fu",
",",
"s1_supercell",
",",
"break_on_match",
"=",
"True",
")",
"if",
"match",
"is",
"None",
":",
"return",
"False",
"else",
":",
"return",
"match",
"[",
"0",
"]",
"<=",
"self",
".",
"stol"
] | 30.28 | 21.48 |
def PC_PI_calc(P, TOP, POP):
"""
Calculate percent chance agreement for Scott's Pi.
:param P: condition positive
:type P : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP:dict
:return: percent chance agreement as float
"""
try:
result = 0
for i in P.keys():
result += ((P[i] + TOP[i]) / (2 * POP[i]))**2
return result
except Exception:
return "None"
|
[
"def",
"PC_PI_calc",
"(",
"P",
",",
"TOP",
",",
"POP",
")",
":",
"try",
":",
"result",
"=",
"0",
"for",
"i",
"in",
"P",
".",
"keys",
"(",
")",
":",
"result",
"+=",
"(",
"(",
"P",
"[",
"i",
"]",
"+",
"TOP",
"[",
"i",
"]",
")",
"/",
"(",
"2",
"*",
"POP",
"[",
"i",
"]",
")",
")",
"**",
"2",
"return",
"result",
"except",
"Exception",
":",
"return",
"\"None\""
] | 24.473684 | 15.947368 |
def analyze_symbol(l, sym, from_ver, to_ver, do_reads=False):
"""
This is a utility function to produce text output with details about the versions of a given symbol.
It is useful for debugging corruption issues and to mark corrupted versions.
Parameters
----------
l : `arctic.store.version_store.VersionStore`
The VersionStore instance against which the analysis will be run.
sym : `str`
The symbol to analyze
from_ver : `int` or `None`
The lower bound for the version number we wish to analyze. If None then start from the earliest version.
to_ver : `int` or `None`
The upper bound for the version number we wish to analyze. If None then stop at the latest version.
do_reads : `bool`
If this flag is set to true, then the corruption check will actually try to read the symbol (slower).
"""
logging.info('Analyzing symbol {}. Versions range is [v{}, v{}]'.format(sym, from_ver, to_ver))
prev_rows = 0
prev_n = 0
prev_v = None
logging.info('\nVersions for {}:'.format(sym))
for v in l._versions.find({'symbol': sym, 'version': {'$gte': from_ver, '$lte': to_ver}},
sort=[('version', pymongo.ASCENDING)]):
n = v.get('version')
is_deleted = v.get('metadata').get('deleted', False) if v.get('metadata') else False
if is_deleted:
matching = 0
else:
spec = {'symbol': sym, 'parent': v.get('base_version_id', v['_id']), 'segment': {'$lt': v.get('up_to', 0)}}
matching = mongo_count(l._collection, filter=spec) if not is_deleted else 0
base_id = v.get('base_version_id')
snaps = ['/'.join((str(x), str(x.generation_time))) for x in v.get('parent')] if v.get('parent') else None
added_rows = v.get('up_to', 0) - prev_rows
meta_match_with_prev = v.get('metadata') == prev_v.get('metadata') if prev_v else False
delta_snap_creation = (min([x.generation_time for x in v.get('parent')]) - v['_id'].generation_time).total_seconds() / 60.0 if v.get('parent') else 0.0
prev_v_diff = 0 if not prev_v else v['version'] - prev_v['version']
corrupted = not is_deleted and (is_corrupted(l, sym, v) if do_reads else fast_is_corrupted(l, sym, v))
logging.info(
"v{: <6} "
"{: <6} "
"{: <5} "
"({: <20}): "
"expected={: <6} "
"found={: <6} "
"last_row={: <10} "
"new_rows={: <10} "
"append count={: <10} "
"append_size={: <10} "
"type={: <14} {: <14} "
"base={: <24}/{: <28} "
"snap={: <30}[{:.1f} mins delayed] "
"{: <20} "
"{: <20}".format(
n,
prev_v_diff,
'DEL' if is_deleted else 'ALIVE',
str(v['_id'].generation_time),
v.get('segment_count', 0),
matching,
v.get('up_to', 0),
added_rows,
v.get('append_count'),
v.get('append_size'),
v.get('type'),
'meta-same' if meta_match_with_prev else 'meta-changed',
str(base_id),
str(base_id.generation_time) if base_id else '',
str(snaps),
delta_snap_creation,
'PREV_MISSING' if prev_n < n - 1 else '',
'CORRUPTED VERSION' if corrupted else '')
)
prev_rows = v.get('up_to', 0)
prev_n = n
prev_v = v
logging.info('\nSegments for {}:'.format(sym))
for seg in l._collection.find({'symbol': sym}, sort=[('_id', pymongo.ASCENDING)]):
logging.info("{: <32} {: <7} {: <10} {: <30}".format(
hashlib.sha1(seg['sha']).hexdigest(),
seg.get('segment'),
'compressed' if seg.get('compressed', False) else 'raw',
str([str(p) for p in seg.get('parent', [])])
))
|
[
"def",
"analyze_symbol",
"(",
"l",
",",
"sym",
",",
"from_ver",
",",
"to_ver",
",",
"do_reads",
"=",
"False",
")",
":",
"logging",
".",
"info",
"(",
"'Analyzing symbol {}. Versions range is [v{}, v{}]'",
".",
"format",
"(",
"sym",
",",
"from_ver",
",",
"to_ver",
")",
")",
"prev_rows",
"=",
"0",
"prev_n",
"=",
"0",
"prev_v",
"=",
"None",
"logging",
".",
"info",
"(",
"'\\nVersions for {}:'",
".",
"format",
"(",
"sym",
")",
")",
"for",
"v",
"in",
"l",
".",
"_versions",
".",
"find",
"(",
"{",
"'symbol'",
":",
"sym",
",",
"'version'",
":",
"{",
"'$gte'",
":",
"from_ver",
",",
"'$lte'",
":",
"to_ver",
"}",
"}",
",",
"sort",
"=",
"[",
"(",
"'version'",
",",
"pymongo",
".",
"ASCENDING",
")",
"]",
")",
":",
"n",
"=",
"v",
".",
"get",
"(",
"'version'",
")",
"is_deleted",
"=",
"v",
".",
"get",
"(",
"'metadata'",
")",
".",
"get",
"(",
"'deleted'",
",",
"False",
")",
"if",
"v",
".",
"get",
"(",
"'metadata'",
")",
"else",
"False",
"if",
"is_deleted",
":",
"matching",
"=",
"0",
"else",
":",
"spec",
"=",
"{",
"'symbol'",
":",
"sym",
",",
"'parent'",
":",
"v",
".",
"get",
"(",
"'base_version_id'",
",",
"v",
"[",
"'_id'",
"]",
")",
",",
"'segment'",
":",
"{",
"'$lt'",
":",
"v",
".",
"get",
"(",
"'up_to'",
",",
"0",
")",
"}",
"}",
"matching",
"=",
"mongo_count",
"(",
"l",
".",
"_collection",
",",
"filter",
"=",
"spec",
")",
"if",
"not",
"is_deleted",
"else",
"0",
"base_id",
"=",
"v",
".",
"get",
"(",
"'base_version_id'",
")",
"snaps",
"=",
"[",
"'/'",
".",
"join",
"(",
"(",
"str",
"(",
"x",
")",
",",
"str",
"(",
"x",
".",
"generation_time",
")",
")",
")",
"for",
"x",
"in",
"v",
".",
"get",
"(",
"'parent'",
")",
"]",
"if",
"v",
".",
"get",
"(",
"'parent'",
")",
"else",
"None",
"added_rows",
"=",
"v",
".",
"get",
"(",
"'up_to'",
",",
"0",
")",
"-",
"prev_rows",
"meta_match_with_prev",
"=",
"v",
".",
"get",
"(",
"'metadata'",
")",
"==",
"prev_v",
".",
"get",
"(",
"'metadata'",
")",
"if",
"prev_v",
"else",
"False",
"delta_snap_creation",
"=",
"(",
"min",
"(",
"[",
"x",
".",
"generation_time",
"for",
"x",
"in",
"v",
".",
"get",
"(",
"'parent'",
")",
"]",
")",
"-",
"v",
"[",
"'_id'",
"]",
".",
"generation_time",
")",
".",
"total_seconds",
"(",
")",
"/",
"60.0",
"if",
"v",
".",
"get",
"(",
"'parent'",
")",
"else",
"0.0",
"prev_v_diff",
"=",
"0",
"if",
"not",
"prev_v",
"else",
"v",
"[",
"'version'",
"]",
"-",
"prev_v",
"[",
"'version'",
"]",
"corrupted",
"=",
"not",
"is_deleted",
"and",
"(",
"is_corrupted",
"(",
"l",
",",
"sym",
",",
"v",
")",
"if",
"do_reads",
"else",
"fast_is_corrupted",
"(",
"l",
",",
"sym",
",",
"v",
")",
")",
"logging",
".",
"info",
"(",
"\"v{: <6} \"",
"\"{: <6} \"",
"\"{: <5} \"",
"\"({: <20}): \"",
"\"expected={: <6} \"",
"\"found={: <6} \"",
"\"last_row={: <10} \"",
"\"new_rows={: <10} \"",
"\"append count={: <10} \"",
"\"append_size={: <10} \"",
"\"type={: <14} {: <14} \"",
"\"base={: <24}/{: <28} \"",
"\"snap={: <30}[{:.1f} mins delayed] \"",
"\"{: <20} \"",
"\"{: <20}\"",
".",
"format",
"(",
"n",
",",
"prev_v_diff",
",",
"'DEL'",
"if",
"is_deleted",
"else",
"'ALIVE'",
",",
"str",
"(",
"v",
"[",
"'_id'",
"]",
".",
"generation_time",
")",
",",
"v",
".",
"get",
"(",
"'segment_count'",
",",
"0",
")",
",",
"matching",
",",
"v",
".",
"get",
"(",
"'up_to'",
",",
"0",
")",
",",
"added_rows",
",",
"v",
".",
"get",
"(",
"'append_count'",
")",
",",
"v",
".",
"get",
"(",
"'append_size'",
")",
",",
"v",
".",
"get",
"(",
"'type'",
")",
",",
"'meta-same'",
"if",
"meta_match_with_prev",
"else",
"'meta-changed'",
",",
"str",
"(",
"base_id",
")",
",",
"str",
"(",
"base_id",
".",
"generation_time",
")",
"if",
"base_id",
"else",
"''",
",",
"str",
"(",
"snaps",
")",
",",
"delta_snap_creation",
",",
"'PREV_MISSING'",
"if",
"prev_n",
"<",
"n",
"-",
"1",
"else",
"''",
",",
"'CORRUPTED VERSION'",
"if",
"corrupted",
"else",
"''",
")",
")",
"prev_rows",
"=",
"v",
".",
"get",
"(",
"'up_to'",
",",
"0",
")",
"prev_n",
"=",
"n",
"prev_v",
"=",
"v",
"logging",
".",
"info",
"(",
"'\\nSegments for {}:'",
".",
"format",
"(",
"sym",
")",
")",
"for",
"seg",
"in",
"l",
".",
"_collection",
".",
"find",
"(",
"{",
"'symbol'",
":",
"sym",
"}",
",",
"sort",
"=",
"[",
"(",
"'_id'",
",",
"pymongo",
".",
"ASCENDING",
")",
"]",
")",
":",
"logging",
".",
"info",
"(",
"\"{: <32} {: <7} {: <10} {: <30}\"",
".",
"format",
"(",
"hashlib",
".",
"sha1",
"(",
"seg",
"[",
"'sha'",
"]",
")",
".",
"hexdigest",
"(",
")",
",",
"seg",
".",
"get",
"(",
"'segment'",
")",
",",
"'compressed'",
"if",
"seg",
".",
"get",
"(",
"'compressed'",
",",
"False",
")",
"else",
"'raw'",
",",
"str",
"(",
"[",
"str",
"(",
"p",
")",
"for",
"p",
"in",
"seg",
".",
"get",
"(",
"'parent'",
",",
"[",
"]",
")",
"]",
")",
")",
")"
] | 41.357895 | 25.652632 |
def _create_sbatch(self, ostr):
"""Write sbatch template to output stream
:param ostr: opened file to write to
"""
properties = dict(
sbatch_arguments=self.sbatch_args, hpcbench_command=self.hpcbench_cmd
)
try:
self.sbatch_template.stream(**properties).dump(ostr)
except jinja2.exceptions.UndefinedError:
self.logger.error('Error while generating SBATCH template:')
self.logger.error('%%<--------' * 5)
for line in self.sbatch_template_str.splitlines():
self.logger.error(line)
self.logger.error('%%<--------' * 5)
self.logger.error('Template properties: %s', properties)
raise
|
[
"def",
"_create_sbatch",
"(",
"self",
",",
"ostr",
")",
":",
"properties",
"=",
"dict",
"(",
"sbatch_arguments",
"=",
"self",
".",
"sbatch_args",
",",
"hpcbench_command",
"=",
"self",
".",
"hpcbench_cmd",
")",
"try",
":",
"self",
".",
"sbatch_template",
".",
"stream",
"(",
"*",
"*",
"properties",
")",
".",
"dump",
"(",
"ostr",
")",
"except",
"jinja2",
".",
"exceptions",
".",
"UndefinedError",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Error while generating SBATCH template:'",
")",
"self",
".",
"logger",
".",
"error",
"(",
"'%%<--------'",
"*",
"5",
")",
"for",
"line",
"in",
"self",
".",
"sbatch_template_str",
".",
"splitlines",
"(",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"line",
")",
"self",
".",
"logger",
".",
"error",
"(",
"'%%<--------'",
"*",
"5",
")",
"self",
".",
"logger",
".",
"error",
"(",
"'Template properties: %s'",
",",
"properties",
")",
"raise"
] | 42.882353 | 16.529412 |
def filter_enzyme_kinase(stmts_in, **kwargs):
"""Filter Phosphorylations to ones where the enzyme is a known kinase.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
logger.info('Filtering %d statements to remove ' % len(stmts_in) +
'phosphorylation by non-kinases...')
path = os.path.dirname(os.path.abspath(__file__))
kinase_table = read_unicode_csv(path + '/../resources/kinases.tsv',
delimiter='\t')
gene_names = [lin[1] for lin in list(kinase_table)[1:]]
stmts_out = []
for st in stmts_in:
if isinstance(st, Phosphorylation):
if st.enz is not None:
if st.enz.name in gene_names:
stmts_out.append(st)
else:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out
|
[
"def",
"filter_enzyme_kinase",
"(",
"stmts_in",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"info",
"(",
"'Filtering %d statements to remove '",
"%",
"len",
"(",
"stmts_in",
")",
"+",
"'phosphorylation by non-kinases...'",
")",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"kinase_table",
"=",
"read_unicode_csv",
"(",
"path",
"+",
"'/../resources/kinases.tsv'",
",",
"delimiter",
"=",
"'\\t'",
")",
"gene_names",
"=",
"[",
"lin",
"[",
"1",
"]",
"for",
"lin",
"in",
"list",
"(",
"kinase_table",
")",
"[",
"1",
":",
"]",
"]",
"stmts_out",
"=",
"[",
"]",
"for",
"st",
"in",
"stmts_in",
":",
"if",
"isinstance",
"(",
"st",
",",
"Phosphorylation",
")",
":",
"if",
"st",
".",
"enz",
"is",
"not",
"None",
":",
"if",
"st",
".",
"enz",
".",
"name",
"in",
"gene_names",
":",
"stmts_out",
".",
"append",
"(",
"st",
")",
"else",
":",
"stmts_out",
".",
"append",
"(",
"st",
")",
"logger",
".",
"info",
"(",
"'%d statements after filter...'",
"%",
"len",
"(",
"stmts_out",
")",
")",
"dump_pkl",
"=",
"kwargs",
".",
"get",
"(",
"'save'",
")",
"if",
"dump_pkl",
":",
"dump_statements",
"(",
"stmts_out",
",",
"dump_pkl",
")",
"return",
"stmts_out"
] | 36.029412 | 16 |
def to_dict(self):
"""Convert this Node to a dict representation for passing to the API."""
return {"address": self.address,
"port": self.port,
"condition": self.condition,
"type": self.type,
"id": self.id,
}
|
[
"def",
"to_dict",
"(",
"self",
")",
":",
"return",
"{",
"\"address\"",
":",
"self",
".",
"address",
",",
"\"port\"",
":",
"self",
".",
"port",
",",
"\"condition\"",
":",
"self",
".",
"condition",
",",
"\"type\"",
":",
"self",
".",
"type",
",",
"\"id\"",
":",
"self",
".",
"id",
",",
"}"
] | 37.125 | 8.875 |
def import_classes(name, currmodule):
# type: (unicode, unicode) -> Any
"""Import a class using its fully-qualified *name*."""
target = None
# import class or module using currmodule
if currmodule:
target = try_import(currmodule + '.' + name)
# import class or module without currmodule
if target is None:
target = try_import(name)
if target is None:
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % name)
if inspect.isclass(target):
# If imported object is a class, just return it
return [target]
elif inspect.ismodule(target):
# If imported object is a module, return classes defined on it
classes = []
for cls in target.__dict__.values():
if inspect.isclass(cls) and cls_is_in_module(cls, mod=target):
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name)
|
[
"def",
"import_classes",
"(",
"name",
",",
"currmodule",
")",
":",
"# type: (unicode, unicode) -> Any",
"target",
"=",
"None",
"# import class or module using currmodule",
"if",
"currmodule",
":",
"target",
"=",
"try_import",
"(",
"currmodule",
"+",
"'.'",
"+",
"name",
")",
"# import class or module without currmodule",
"if",
"target",
"is",
"None",
":",
"target",
"=",
"try_import",
"(",
"name",
")",
"if",
"target",
"is",
"None",
":",
"raise",
"InheritanceException",
"(",
"'Could not import class or module %r specified for '",
"'inheritance diagram'",
"%",
"name",
")",
"if",
"inspect",
".",
"isclass",
"(",
"target",
")",
":",
"# If imported object is a class, just return it",
"return",
"[",
"target",
"]",
"elif",
"inspect",
".",
"ismodule",
"(",
"target",
")",
":",
"# If imported object is a module, return classes defined on it",
"classes",
"=",
"[",
"]",
"for",
"cls",
"in",
"target",
".",
"__dict__",
".",
"values",
"(",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"cls",
")",
"and",
"cls_is_in_module",
"(",
"cls",
",",
"mod",
"=",
"target",
")",
":",
"classes",
".",
"append",
"(",
"cls",
")",
"return",
"classes",
"raise",
"InheritanceException",
"(",
"'%r specified for inheritance diagram is '",
"'not a class or module'",
"%",
"name",
")"
] | 35.7 | 17.366667 |
def delete(self):
"""
Delete this :term:`Metrics Context` resource.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
self.manager.session.delete(self.uri)
self.manager._metrics_contexts.remove(self)
|
[
"def",
"delete",
"(",
"self",
")",
":",
"self",
".",
"manager",
".",
"session",
".",
"delete",
"(",
"self",
".",
"uri",
")",
"self",
".",
"manager",
".",
"_metrics_contexts",
".",
"remove",
"(",
"self",
")"
] | 27.846154 | 12.769231 |
def read(self, structure):
""" Read and advance. """
start = self.offset
self.skip(structure.size)
return structure.read(self.buf, start)
|
[
"def",
"read",
"(",
"self",
",",
"structure",
")",
":",
"start",
"=",
"self",
".",
"offset",
"self",
".",
"skip",
"(",
"structure",
".",
"size",
")",
"return",
"structure",
".",
"read",
"(",
"self",
".",
"buf",
",",
"start",
")"
] | 33 | 8 |
def do_cat(self, subcmd, opts, *args):
"""Output the content of specified files or URLs.
usage:
cat TARGET...
${cmd_option_list}
"""
print "'svn %s' opts: %s" % (subcmd, opts)
print "'svn %s' args: %s" % (subcmd, args)
|
[
"def",
"do_cat",
"(",
"self",
",",
"subcmd",
",",
"opts",
",",
"*",
"args",
")",
":",
"print",
"\"'svn %s' opts: %s\"",
"%",
"(",
"subcmd",
",",
"opts",
")",
"print",
"\"'svn %s' args: %s\"",
"%",
"(",
"subcmd",
",",
"args",
")"
] | 27.9 | 14.9 |
def GetMacAddresses(self):
"""MAC addresses from all interfaces."""
result = set()
for interface in self.interfaces:
if (interface.mac_address and
interface.mac_address != b"\x00" * len(interface.mac_address)):
result.add(Text(interface.mac_address.human_readable_address))
return sorted(result)
|
[
"def",
"GetMacAddresses",
"(",
"self",
")",
":",
"result",
"=",
"set",
"(",
")",
"for",
"interface",
"in",
"self",
".",
"interfaces",
":",
"if",
"(",
"interface",
".",
"mac_address",
"and",
"interface",
".",
"mac_address",
"!=",
"b\"\\x00\"",
"*",
"len",
"(",
"interface",
".",
"mac_address",
")",
")",
":",
"result",
".",
"add",
"(",
"Text",
"(",
"interface",
".",
"mac_address",
".",
"human_readable_address",
")",
")",
"return",
"sorted",
"(",
"result",
")"
] | 41 | 15.25 |
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
ZipFileEntry: a file entry or None.
"""
path_spec = zip_path_spec.ZipPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec)
|
[
"def",
"GetRootFileEntry",
"(",
"self",
")",
":",
"path_spec",
"=",
"zip_path_spec",
".",
"ZipPathSpec",
"(",
"location",
"=",
"self",
".",
"LOCATION_ROOT",
",",
"parent",
"=",
"self",
".",
"_path_spec",
".",
"parent",
")",
"return",
"self",
".",
"GetFileEntryByPathSpec",
"(",
"path_spec",
")"
] | 31.333333 | 13.333333 |
def _depend_on_lambda_permissions_using_tag(self, bucket, permission):
"""
Since conditional DependsOn is not supported this undocumented way of
implicitely making dependency through tags is used.
See https://stackoverflow.com/questions/34607476/cloudformation-apply-condition-on-dependson
It is done by using Ref wrapped in a conditional Fn::If. Using Ref implies a
dependency, so CloudFormation will automatically wait once it reaches that function, the same
as if you were using a DependsOn.
"""
properties = bucket.get('Properties', None)
if properties is None:
properties = {}
bucket['Properties'] = properties
tags = properties.get('Tags', None)
if tags is None:
tags = []
properties['Tags'] = tags
dep_tag = {
'sam:ConditionalDependsOn:' + permission.logical_id: {
'Fn::If': [
permission.resource_attributes[CONDITION],
ref(permission.logical_id),
'no dependency'
]
}
}
properties['Tags'] = tags + get_tag_list(dep_tag)
return bucket
|
[
"def",
"_depend_on_lambda_permissions_using_tag",
"(",
"self",
",",
"bucket",
",",
"permission",
")",
":",
"properties",
"=",
"bucket",
".",
"get",
"(",
"'Properties'",
",",
"None",
")",
"if",
"properties",
"is",
"None",
":",
"properties",
"=",
"{",
"}",
"bucket",
"[",
"'Properties'",
"]",
"=",
"properties",
"tags",
"=",
"properties",
".",
"get",
"(",
"'Tags'",
",",
"None",
")",
"if",
"tags",
"is",
"None",
":",
"tags",
"=",
"[",
"]",
"properties",
"[",
"'Tags'",
"]",
"=",
"tags",
"dep_tag",
"=",
"{",
"'sam:ConditionalDependsOn:'",
"+",
"permission",
".",
"logical_id",
":",
"{",
"'Fn::If'",
":",
"[",
"permission",
".",
"resource_attributes",
"[",
"CONDITION",
"]",
",",
"ref",
"(",
"permission",
".",
"logical_id",
")",
",",
"'no dependency'",
"]",
"}",
"}",
"properties",
"[",
"'Tags'",
"]",
"=",
"tags",
"+",
"get_tag_list",
"(",
"dep_tag",
")",
"return",
"bucket"
] | 40.2 | 20.8 |
def row_structural_typicality(self, X_L_list, X_D_list, row_id):
"""Returns the typicality (opposite of anomalousness) of given row.
:param row_id: id of the target row
:type row_id: int
:returns: float, the typicality, from 0 to 1
"""
return su.row_structural_typicality(X_L_list, X_D_list, row_id)
|
[
"def",
"row_structural_typicality",
"(",
"self",
",",
"X_L_list",
",",
"X_D_list",
",",
"row_id",
")",
":",
"return",
"su",
".",
"row_structural_typicality",
"(",
"X_L_list",
",",
"X_D_list",
",",
"row_id",
")"
] | 37.888889 | 18.333333 |
def _check_mappings(self, doc_type, body):
"""
We desire to index content so that anything we want to be textually searchable(and therefore needing to be
analysed), but the other fields are designed to be filters, and only require an exact match. So, we want to
set up the mappings for these fields as "not_analyzed" - this will allow our filters to work faster because
they only have to work off exact matches
"""
# Make fields other than content be indexed as unanalyzed terms - content
# contains fields that are to be analyzed
exclude_fields = ["content"]
field_properties = getattr(settings, "ELASTIC_FIELD_MAPPINGS", {})
def field_property(field_name, field_value):
"""
Prepares field as property syntax for providing correct mapping desired for field
Mappings format in elasticsearch is as follows:
{
"doc_type": {
"properties": {
"nested_property": {
"properties": {
"an_analysed_property": {
"type": "string"
},
"another_analysed_property": {
"type": "string"
}
}
},
"a_not_analysed_property": {
"type": "string",
"index": "not_analyzed"
},
"a_date_property": {
"type": "date"
}
}
}
}
We can only add new ones, but the format is the same
"""
prop_val = None
if field_name in field_properties:
prop_val = field_properties[field_name]
elif isinstance(field_value, dict):
props = {fn: field_property(fn, field_value[fn]) for fn in field_value}
prop_val = {"properties": props}
else:
prop_val = {
"type": "string",
"index": "not_analyzed",
}
return prop_val
new_properties = {
field: field_property(field, value)
for field, value in body.items()
if (field not in exclude_fields) and (field not in self._get_mappings(doc_type).get('properties', {}))
}
if new_properties:
self._es.indices.put_mapping(
index=self.index_name,
doc_type=doc_type,
body={
doc_type: {
"properties": new_properties,
}
}
)
self._clear_mapping(doc_type)
|
[
"def",
"_check_mappings",
"(",
"self",
",",
"doc_type",
",",
"body",
")",
":",
"# Make fields other than content be indexed as unanalyzed terms - content",
"# contains fields that are to be analyzed",
"exclude_fields",
"=",
"[",
"\"content\"",
"]",
"field_properties",
"=",
"getattr",
"(",
"settings",
",",
"\"ELASTIC_FIELD_MAPPINGS\"",
",",
"{",
"}",
")",
"def",
"field_property",
"(",
"field_name",
",",
"field_value",
")",
":",
"\"\"\"\n Prepares field as property syntax for providing correct mapping desired for field\n\n Mappings format in elasticsearch is as follows:\n {\n \"doc_type\": {\n \"properties\": {\n \"nested_property\": {\n \"properties\": {\n \"an_analysed_property\": {\n \"type\": \"string\"\n },\n \"another_analysed_property\": {\n \"type\": \"string\"\n }\n }\n },\n \"a_not_analysed_property\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n },\n \"a_date_property\": {\n \"type\": \"date\"\n }\n }\n }\n }\n\n We can only add new ones, but the format is the same\n \"\"\"",
"prop_val",
"=",
"None",
"if",
"field_name",
"in",
"field_properties",
":",
"prop_val",
"=",
"field_properties",
"[",
"field_name",
"]",
"elif",
"isinstance",
"(",
"field_value",
",",
"dict",
")",
":",
"props",
"=",
"{",
"fn",
":",
"field_property",
"(",
"fn",
",",
"field_value",
"[",
"fn",
"]",
")",
"for",
"fn",
"in",
"field_value",
"}",
"prop_val",
"=",
"{",
"\"properties\"",
":",
"props",
"}",
"else",
":",
"prop_val",
"=",
"{",
"\"type\"",
":",
"\"string\"",
",",
"\"index\"",
":",
"\"not_analyzed\"",
",",
"}",
"return",
"prop_val",
"new_properties",
"=",
"{",
"field",
":",
"field_property",
"(",
"field",
",",
"value",
")",
"for",
"field",
",",
"value",
"in",
"body",
".",
"items",
"(",
")",
"if",
"(",
"field",
"not",
"in",
"exclude_fields",
")",
"and",
"(",
"field",
"not",
"in",
"self",
".",
"_get_mappings",
"(",
"doc_type",
")",
".",
"get",
"(",
"'properties'",
",",
"{",
"}",
")",
")",
"}",
"if",
"new_properties",
":",
"self",
".",
"_es",
".",
"indices",
".",
"put_mapping",
"(",
"index",
"=",
"self",
".",
"index_name",
",",
"doc_type",
"=",
"doc_type",
",",
"body",
"=",
"{",
"doc_type",
":",
"{",
"\"properties\"",
":",
"new_properties",
",",
"}",
"}",
")",
"self",
".",
"_clear_mapping",
"(",
"doc_type",
")"
] | 37.6 | 18.906667 |
def _load_simple_section_questions(self, item_ids):
"""For loading the simple section case (common)
just load the questions for the section, and insert the one part
into assessment part map.
"""
self._insert_part_map(
get_default_part_map(self._assessment_part_id,
0,
self._assessment_part.are_items_sequential()))
lookup_session = self._get_item_lookup_session()
items = lookup_session.get_items_by_ids(item_ids)
display_num = 1
for item in items:
question_id = item.get_question().get_id()
self._my_map['questions'].append(get_default_question_map(
item.get_id(),
question_id,
self._assessment_part_id,
[display_num]))
display_num += 1
self._save()
|
[
"def",
"_load_simple_section_questions",
"(",
"self",
",",
"item_ids",
")",
":",
"self",
".",
"_insert_part_map",
"(",
"get_default_part_map",
"(",
"self",
".",
"_assessment_part_id",
",",
"0",
",",
"self",
".",
"_assessment_part",
".",
"are_items_sequential",
"(",
")",
")",
")",
"lookup_session",
"=",
"self",
".",
"_get_item_lookup_session",
"(",
")",
"items",
"=",
"lookup_session",
".",
"get_items_by_ids",
"(",
"item_ids",
")",
"display_num",
"=",
"1",
"for",
"item",
"in",
"items",
":",
"question_id",
"=",
"item",
".",
"get_question",
"(",
")",
".",
"get_id",
"(",
")",
"self",
".",
"_my_map",
"[",
"'questions'",
"]",
".",
"append",
"(",
"get_default_question_map",
"(",
"item",
".",
"get_id",
"(",
")",
",",
"question_id",
",",
"self",
".",
"_assessment_part_id",
",",
"[",
"display_num",
"]",
")",
")",
"display_num",
"+=",
"1",
"self",
".",
"_save",
"(",
")"
] | 38.608696 | 16.26087 |
def pagedump(self):
"""
dump the contents of all pages, ignoring links between pages,
this will enable you to view contents of pages which have become
lost due to datacorruption.
"""
self.fh.seek(self.pagesize)
pn = 1
while True:
try:
pagedata = self.fh.read(self.pagesize)
if len(pagedata) == 0:
break
elif len(pagedata) != self.pagesize:
print("%06x: incomplete - %d bytes ( pagesize = %d )" % (pn, len(pagedata), self.pagesize))
break
elif pagedata == b'\x00' * self.pagesize:
print("%06x: empty" % (pn))
else:
page = self.page(pagedata)
print("%06x: preceeding = %06x, reccount = %04x" % (pn, page.preceeding, page.count))
for ent in page.index:
print(" %s" % ent)
except Exception as e:
print("%06x: ERROR decoding as B-tree page: %s" % (pn, e))
pn += 1
|
[
"def",
"pagedump",
"(",
"self",
")",
":",
"self",
".",
"fh",
".",
"seek",
"(",
"self",
".",
"pagesize",
")",
"pn",
"=",
"1",
"while",
"True",
":",
"try",
":",
"pagedata",
"=",
"self",
".",
"fh",
".",
"read",
"(",
"self",
".",
"pagesize",
")",
"if",
"len",
"(",
"pagedata",
")",
"==",
"0",
":",
"break",
"elif",
"len",
"(",
"pagedata",
")",
"!=",
"self",
".",
"pagesize",
":",
"print",
"(",
"\"%06x: incomplete - %d bytes ( pagesize = %d )\"",
"%",
"(",
"pn",
",",
"len",
"(",
"pagedata",
")",
",",
"self",
".",
"pagesize",
")",
")",
"break",
"elif",
"pagedata",
"==",
"b'\\x00'",
"*",
"self",
".",
"pagesize",
":",
"print",
"(",
"\"%06x: empty\"",
"%",
"(",
"pn",
")",
")",
"else",
":",
"page",
"=",
"self",
".",
"page",
"(",
"pagedata",
")",
"print",
"(",
"\"%06x: preceeding = %06x, reccount = %04x\"",
"%",
"(",
"pn",
",",
"page",
".",
"preceeding",
",",
"page",
".",
"count",
")",
")",
"for",
"ent",
"in",
"page",
".",
"index",
":",
"print",
"(",
"\" %s\"",
"%",
"ent",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"%06x: ERROR decoding as B-tree page: %s\"",
"%",
"(",
"pn",
",",
"e",
")",
")",
"pn",
"+=",
"1"
] | 41.555556 | 19.037037 |
def get_letters_iterable( word ):
""" splits the word into a character-list of tamil/english
characters present in the stream """
WLEN,idx = len(word),0
while (idx < WLEN):
c = word[idx]
#print(idx,hex(ord(c)),len(ta_letters))
if c in uyir_letter_set or c == ayudha_letter:
idx = idx + 1
yield c
elif c in grantha_agaram_set:
if idx + 1 < WLEN and word[idx+1] in all_symbol_set:
c2 = word[idx+1]
idx = idx + 2
yield (c + c2)
else:
idx = idx + 1
yield c
else:
idx = idx + 1
yield c
return
|
[
"def",
"get_letters_iterable",
"(",
"word",
")",
":",
"WLEN",
",",
"idx",
"=",
"len",
"(",
"word",
")",
",",
"0",
"while",
"(",
"idx",
"<",
"WLEN",
")",
":",
"c",
"=",
"word",
"[",
"idx",
"]",
"#print(idx,hex(ord(c)),len(ta_letters))",
"if",
"c",
"in",
"uyir_letter_set",
"or",
"c",
"==",
"ayudha_letter",
":",
"idx",
"=",
"idx",
"+",
"1",
"yield",
"c",
"elif",
"c",
"in",
"grantha_agaram_set",
":",
"if",
"idx",
"+",
"1",
"<",
"WLEN",
"and",
"word",
"[",
"idx",
"+",
"1",
"]",
"in",
"all_symbol_set",
":",
"c2",
"=",
"word",
"[",
"idx",
"+",
"1",
"]",
"idx",
"=",
"idx",
"+",
"2",
"yield",
"(",
"c",
"+",
"c2",
")",
"else",
":",
"idx",
"=",
"idx",
"+",
"1",
"yield",
"c",
"else",
":",
"idx",
"=",
"idx",
"+",
"1",
"yield",
"c",
"return"
] | 29.478261 | 15.391304 |
def _fix_reindent(self, result):
"""Fix a badly indented line.
This is done by adding or removing from its initial indent only.
"""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
|
[
"def",
"_fix_reindent",
"(",
"self",
",",
"result",
")",
":",
"num_indent_spaces",
"=",
"int",
"(",
"result",
"[",
"'info'",
"]",
".",
"split",
"(",
")",
"[",
"1",
"]",
")",
"line_index",
"=",
"result",
"[",
"'line'",
"]",
"-",
"1",
"target",
"=",
"self",
".",
"source",
"[",
"line_index",
"]",
"self",
".",
"source",
"[",
"line_index",
"]",
"=",
"' '",
"*",
"num_indent_spaces",
"+",
"target",
".",
"lstrip",
"(",
")"
] | 33.090909 | 19.454545 |
def extend(self, iterable):
"""
Add each item from iterable to the end of the list
"""
with self.lock:
for item in iterable:
self.append(item)
|
[
"def",
"extend",
"(",
"self",
",",
"iterable",
")",
":",
"with",
"self",
".",
"lock",
":",
"for",
"item",
"in",
"iterable",
":",
"self",
".",
"append",
"(",
"item",
")"
] | 28 | 8.857143 |
def spitOut(s, file, binary=False, expand=False):
r"""Write string `s` into `file` (which can be a string (`str` or
`unicode`) or a `file` instance)."""
mode = "w" + ["b",""][not binary]
file = _normalizeToFile(file, mode=mode, expand=expand)
try: file.write(s)
finally: file.close()
|
[
"def",
"spitOut",
"(",
"s",
",",
"file",
",",
"binary",
"=",
"False",
",",
"expand",
"=",
"False",
")",
":",
"mode",
"=",
"\"w\"",
"+",
"[",
"\"b\"",
",",
"\"\"",
"]",
"[",
"not",
"binary",
"]",
"file",
"=",
"_normalizeToFile",
"(",
"file",
",",
"mode",
"=",
"mode",
",",
"expand",
"=",
"expand",
")",
"try",
":",
"file",
".",
"write",
"(",
"s",
")",
"finally",
":",
"file",
".",
"close",
"(",
")"
] | 43.571429 | 8.571429 |
def get_prev_sibling_tags(mention):
"""Return the HTML tag of the Mention's previous siblings.
Previous siblings are Mentions which are at the same level in the HTML tree
as the given mention, but are declared before the given mention. If a
candidate is passed in, only the previous siblings of its first Mention are
considered in the calculation.
:param mention: The Mention to evaluate
:rtype: list of strings
"""
span = _to_span(mention)
prev_sibling_tags = []
i = _get_node(span.sentence)
while i.getprevious() is not None:
prev_sibling_tags.insert(0, str(i.getprevious().tag))
i = i.getprevious()
return prev_sibling_tags
|
[
"def",
"get_prev_sibling_tags",
"(",
"mention",
")",
":",
"span",
"=",
"_to_span",
"(",
"mention",
")",
"prev_sibling_tags",
"=",
"[",
"]",
"i",
"=",
"_get_node",
"(",
"span",
".",
"sentence",
")",
"while",
"i",
".",
"getprevious",
"(",
")",
"is",
"not",
"None",
":",
"prev_sibling_tags",
".",
"insert",
"(",
"0",
",",
"str",
"(",
"i",
".",
"getprevious",
"(",
")",
".",
"tag",
")",
")",
"i",
"=",
"i",
".",
"getprevious",
"(",
")",
"return",
"prev_sibling_tags"
] | 37.722222 | 16.666667 |
def regressfile(filename):
"""
Run all stories in filename 'filename' in python 2 and 3.
"""
_storybook({"rewrite": False}).in_filename(filename).with_params(
**{"python version": "2.7.14"}
).filter(
lambda story: not story.info.get("fails_on_python_2")
).ordered_by_name().play()
_storybook({"rewrite": False}).with_params(
**{"python version": "3.7.0"}
).in_filename(filename).ordered_by_name().play()
|
[
"def",
"regressfile",
"(",
"filename",
")",
":",
"_storybook",
"(",
"{",
"\"rewrite\"",
":",
"False",
"}",
")",
".",
"in_filename",
"(",
"filename",
")",
".",
"with_params",
"(",
"*",
"*",
"{",
"\"python version\"",
":",
"\"2.7.14\"",
"}",
")",
".",
"filter",
"(",
"lambda",
"story",
":",
"not",
"story",
".",
"info",
".",
"get",
"(",
"\"fails_on_python_2\"",
")",
")",
".",
"ordered_by_name",
"(",
")",
".",
"play",
"(",
")",
"_storybook",
"(",
"{",
"\"rewrite\"",
":",
"False",
"}",
")",
".",
"with_params",
"(",
"*",
"*",
"{",
"\"python version\"",
":",
"\"3.7.0\"",
"}",
")",
".",
"in_filename",
"(",
"filename",
")",
".",
"ordered_by_name",
"(",
")",
".",
"play",
"(",
")"
] | 34.461538 | 14.307692 |
async def unformat(self):
"""Unformat this partition."""
self._data = await self._handler.unformat(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id)
|
[
"async",
"def",
"unformat",
"(",
"self",
")",
":",
"self",
".",
"_data",
"=",
"await",
"self",
".",
"_handler",
".",
"unformat",
"(",
"system_id",
"=",
"self",
".",
"block_device",
".",
"node",
".",
"system_id",
",",
"device_id",
"=",
"self",
".",
"block_device",
".",
"id",
",",
"id",
"=",
"self",
".",
"id",
")"
] | 44.6 | 11 |
def PackageVariable(key, help, default, searchfunc=None):
# NB: searchfunc is currently undocumented and unsupported
"""
The input parameters describe a 'package list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
A 'package list' option may either be 'all', 'none' or a list of
package names (separated by space).
"""
help = '\n '.join(
(help, '( yes | no | /path/to/%s )' % key))
return (key, help, default,
lambda k, v, e: _validator(k,v,e,searchfunc),
_converter)
|
[
"def",
"PackageVariable",
"(",
"key",
",",
"help",
",",
"default",
",",
"searchfunc",
"=",
"None",
")",
":",
"# NB: searchfunc is currently undocumented and unsupported",
"help",
"=",
"'\\n '",
".",
"join",
"(",
"(",
"help",
",",
"'( yes | no | /path/to/%s )'",
"%",
"key",
")",
")",
"return",
"(",
"key",
",",
"help",
",",
"default",
",",
"lambda",
"k",
",",
"v",
",",
"e",
":",
"_validator",
"(",
"k",
",",
"v",
",",
"e",
",",
"searchfunc",
")",
",",
"_converter",
")"
] | 40.8 | 16.133333 |
def parseSOAPMessage(data, ipAddr):
"parse raw XML data string, return a (minidom) xml document"
try:
dom = minidom.parseString(data)
except Exception:
#print('Failed to parse message from %s\n"%s": %s' % (ipAddr, data, ex), file=sys.stderr)
return None
if dom.getElementsByTagNameNS(NS_S, "Fault"):
#print('Fault received from %s:' % (ipAddr, data), file=sys.stderr)
return None
soapAction = dom.getElementsByTagNameNS(NS_A, "Action")[0].firstChild.data.strip()
if soapAction == ACTION_PROBE:
return parseProbeMessage(dom)
elif soapAction == ACTION_PROBE_MATCH:
return parseProbeMatchMessage(dom)
elif soapAction == ACTION_RESOLVE:
return parseResolveMessage(dom)
elif soapAction == ACTION_RESOLVE_MATCH:
return parseResolveMatchMessage(dom)
elif soapAction == ACTION_BYE:
return parseByeMessage(dom)
elif soapAction == ACTION_HELLO:
return parseHelloMessage(dom)
|
[
"def",
"parseSOAPMessage",
"(",
"data",
",",
"ipAddr",
")",
":",
"try",
":",
"dom",
"=",
"minidom",
".",
"parseString",
"(",
"data",
")",
"except",
"Exception",
":",
"#print('Failed to parse message from %s\\n\"%s\": %s' % (ipAddr, data, ex), file=sys.stderr)",
"return",
"None",
"if",
"dom",
".",
"getElementsByTagNameNS",
"(",
"NS_S",
",",
"\"Fault\"",
")",
":",
"#print('Fault received from %s:' % (ipAddr, data), file=sys.stderr)",
"return",
"None",
"soapAction",
"=",
"dom",
".",
"getElementsByTagNameNS",
"(",
"NS_A",
",",
"\"Action\"",
")",
"[",
"0",
"]",
".",
"firstChild",
".",
"data",
".",
"strip",
"(",
")",
"if",
"soapAction",
"==",
"ACTION_PROBE",
":",
"return",
"parseProbeMessage",
"(",
"dom",
")",
"elif",
"soapAction",
"==",
"ACTION_PROBE_MATCH",
":",
"return",
"parseProbeMatchMessage",
"(",
"dom",
")",
"elif",
"soapAction",
"==",
"ACTION_RESOLVE",
":",
"return",
"parseResolveMessage",
"(",
"dom",
")",
"elif",
"soapAction",
"==",
"ACTION_RESOLVE_MATCH",
":",
"return",
"parseResolveMatchMessage",
"(",
"dom",
")",
"elif",
"soapAction",
"==",
"ACTION_BYE",
":",
"return",
"parseByeMessage",
"(",
"dom",
")",
"elif",
"soapAction",
"==",
"ACTION_HELLO",
":",
"return",
"parseHelloMessage",
"(",
"dom",
")"
] | 37.461538 | 16.615385 |
def remove_behaviour(self, behaviour):
"""
Removes a behaviour from the agent.
The behaviour is first killed.
Args:
behaviour (spade.behaviour.CyclicBehaviour): the behaviour instance to be removed
"""
if not self.has_behaviour(behaviour):
raise ValueError("This behaviour is not registered")
index = self.behaviours.index(behaviour)
self.behaviours[index].kill()
self.behaviours.pop(index)
|
[
"def",
"remove_behaviour",
"(",
"self",
",",
"behaviour",
")",
":",
"if",
"not",
"self",
".",
"has_behaviour",
"(",
"behaviour",
")",
":",
"raise",
"ValueError",
"(",
"\"This behaviour is not registered\"",
")",
"index",
"=",
"self",
".",
"behaviours",
".",
"index",
"(",
"behaviour",
")",
"self",
".",
"behaviours",
"[",
"index",
"]",
".",
"kill",
"(",
")",
"self",
".",
"behaviours",
".",
"pop",
"(",
"index",
")"
] | 33.785714 | 15.071429 |
def call(self):
"""
call: ['mut'] ID ['(' parameters ')']
"""
is_mutable = False
if self.token.nature == Nature.MUT:
is_mutable = True
self._process(Nature.MUT)
identifier = Identifier(name=self.token.value)
self._process(Nature.ID)
if self.token.nature == Nature.LPAREN:
return FunctionCall(identifier=identifier, parameters=self.parameters())
else:
return Variable(identifier=identifier, is_mutable=is_mutable)
|
[
"def",
"call",
"(",
"self",
")",
":",
"is_mutable",
"=",
"False",
"if",
"self",
".",
"token",
".",
"nature",
"==",
"Nature",
".",
"MUT",
":",
"is_mutable",
"=",
"True",
"self",
".",
"_process",
"(",
"Nature",
".",
"MUT",
")",
"identifier",
"=",
"Identifier",
"(",
"name",
"=",
"self",
".",
"token",
".",
"value",
")",
"self",
".",
"_process",
"(",
"Nature",
".",
"ID",
")",
"if",
"self",
".",
"token",
".",
"nature",
"==",
"Nature",
".",
"LPAREN",
":",
"return",
"FunctionCall",
"(",
"identifier",
"=",
"identifier",
",",
"parameters",
"=",
"self",
".",
"parameters",
"(",
")",
")",
"else",
":",
"return",
"Variable",
"(",
"identifier",
"=",
"identifier",
",",
"is_mutable",
"=",
"is_mutable",
")"
] | 32.4375 | 17.0625 |
def get_lights(self):
'''
Lists all available lights on the bridge.
'''
url = '/api/%s/lights' % self.username
response = self.make_request('GET', url)
lights = []
# Did we get a success response back?
# error responses look like:
# [{'error': {'address': '/lights',
# 'description': 'unauthorized user',
# 'type': 1}}]
if 'error' in response:
raise Exception(response['error']['description'])
for id_, data in response.items():
lights.append(Light(
id_,
data['modelid'],
data['name'],
data['state'],
data['swversion'],
data['type'],
data['uniqueid']
))
lights = sorted(lights, key=lambda x: x.light_id)
self._lights = lights
return lights
|
[
"def",
"get_lights",
"(",
"self",
")",
":",
"url",
"=",
"'/api/%s/lights'",
"%",
"self",
".",
"username",
"response",
"=",
"self",
".",
"make_request",
"(",
"'GET'",
",",
"url",
")",
"lights",
"=",
"[",
"]",
"# Did we get a success response back?",
"# error responses look like:",
"# [{'error': {'address': '/lights',",
"# 'description': 'unauthorized user',",
"# 'type': 1}}]",
"if",
"'error'",
"in",
"response",
":",
"raise",
"Exception",
"(",
"response",
"[",
"'error'",
"]",
"[",
"'description'",
"]",
")",
"for",
"id_",
",",
"data",
"in",
"response",
".",
"items",
"(",
")",
":",
"lights",
".",
"append",
"(",
"Light",
"(",
"id_",
",",
"data",
"[",
"'modelid'",
"]",
",",
"data",
"[",
"'name'",
"]",
",",
"data",
"[",
"'state'",
"]",
",",
"data",
"[",
"'swversion'",
"]",
",",
"data",
"[",
"'type'",
"]",
",",
"data",
"[",
"'uniqueid'",
"]",
")",
")",
"lights",
"=",
"sorted",
"(",
"lights",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"light_id",
")",
"self",
".",
"_lights",
"=",
"lights",
"return",
"lights"
] | 29.833333 | 15.433333 |
def get_bool(self, key: str) -> Optional[bool]:
"""
Returns an optional configuration value, as a bool, by its key, or None if it doesn't exist.
If the configuration value isn't a legal boolean, this function will throw an error.
:param str key: The requested configuration key.
:return: The configuration key's value, or None if one does not exist.
:rtype: Optional[bool]
:raises ConfigTypeError: The configuration value existed but couldn't be coerced to bool.
"""
v = self.get(key)
if v is None:
return None
if v in ['true', 'True']:
return True
if v in ['false', 'False']:
return False
raise ConfigTypeError(self.full_key(key), v, 'bool')
|
[
"def",
"get_bool",
"(",
"self",
",",
"key",
":",
"str",
")",
"->",
"Optional",
"[",
"bool",
"]",
":",
"v",
"=",
"self",
".",
"get",
"(",
"key",
")",
"if",
"v",
"is",
"None",
":",
"return",
"None",
"if",
"v",
"in",
"[",
"'true'",
",",
"'True'",
"]",
":",
"return",
"True",
"if",
"v",
"in",
"[",
"'false'",
",",
"'False'",
"]",
":",
"return",
"False",
"raise",
"ConfigTypeError",
"(",
"self",
".",
"full_key",
"(",
"key",
")",
",",
"v",
",",
"'bool'",
")"
] | 42.555556 | 22 |
def computePointing(self, ra_deg, dec_deg, roll_deg, cartesian=False):
"""Compute a pointing model without changing the internal object pointing"""
# Roll FOV
Rrotate = r.rotateInXMat(roll_deg) # Roll
# Slew from ra/dec of zero
Ra = r.rightAscensionRotationMatrix(ra_deg)
Rd = r.declinationRotationMatrix(dec_deg)
Rslew = np.dot(Ra, Rd)
R = np.dot(Rslew, Rrotate)
slew = self.origin*1
for i, row in enumerate(self.origin):
slew[i, 3:6] = np.dot(R, row[3:6])
if cartesian is False:
slew = self.getRaDecs(slew)
return slew
|
[
"def",
"computePointing",
"(",
"self",
",",
"ra_deg",
",",
"dec_deg",
",",
"roll_deg",
",",
"cartesian",
"=",
"False",
")",
":",
"# Roll FOV",
"Rrotate",
"=",
"r",
".",
"rotateInXMat",
"(",
"roll_deg",
")",
"# Roll",
"# Slew from ra/dec of zero",
"Ra",
"=",
"r",
".",
"rightAscensionRotationMatrix",
"(",
"ra_deg",
")",
"Rd",
"=",
"r",
".",
"declinationRotationMatrix",
"(",
"dec_deg",
")",
"Rslew",
"=",
"np",
".",
"dot",
"(",
"Ra",
",",
"Rd",
")",
"R",
"=",
"np",
".",
"dot",
"(",
"Rslew",
",",
"Rrotate",
")",
"slew",
"=",
"self",
".",
"origin",
"*",
"1",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"origin",
")",
":",
"slew",
"[",
"i",
",",
"3",
":",
"6",
"]",
"=",
"np",
".",
"dot",
"(",
"R",
",",
"row",
"[",
"3",
":",
"6",
"]",
")",
"if",
"cartesian",
"is",
"False",
":",
"slew",
"=",
"self",
".",
"getRaDecs",
"(",
"slew",
")",
"return",
"slew"
] | 33 | 16.789474 |
def _build_sectors_and_pages(self, keep_unwritten):
"""! @brief Converts the list of flash operations to flash sectors and pages.
@param self
@param keep_unwritten If true, unwritten pages in an erased sector and unwritten
contents of a modified page will be read from the target and added to the data to be
programmed.
@exception FlashFailure Could not get sector or page info for an address.
"""
assert len(self.flash_operation_list) > 0
self.program_byte_count = 0
flash_addr = self.flash_operation_list[0].addr
sector_info = self.flash.get_sector_info(flash_addr)
if sector_info is None:
raise FlashFailure("Attempt to program flash at invalid address 0x%08x" % flash_addr)
page_info = self.flash.get_page_info(flash_addr)
if page_info is None:
raise FlashFailure("Attempt to program flash at invalid address 0x%08x" % flash_addr)
current_sector = _FlashSector(sector_info)
self.sector_list.append(current_sector)
current_page = _FlashPage(page_info)
current_sector.add_page(current_page)
self.page_list.append(current_page)
for flash_operation in self.flash_operation_list:
pos = 0
while pos < len(flash_operation.data):
flash_addr = flash_operation.addr + pos
# Check if operation is in a different sector.
if flash_addr >= current_sector.addr + current_sector.size:
sector_info = self.flash.get_sector_info(flash_addr)
if sector_info is None:
raise FlashFailure("Attempt to program flash at invalid address 0x%08x" % flash_addr)
current_sector = _FlashSector(sector_info)
self.sector_list.append(current_sector)
# Check if operation is in a different page
if flash_addr >= current_page.addr + current_page.size:
page_info = self.flash.get_page_info(flash_addr)
if page_info is None:
raise FlashFailure("Attempt to program flash at invalid address 0x%08x" % flash_addr)
current_page = _FlashPage(page_info)
current_sector.add_page(current_page)
self.page_list.append(current_page)
# Fill the page gap if there is one
page_data_end = current_page.addr + len(current_page.data)
if flash_addr != page_data_end:
old_data_len = flash_addr - page_data_end
if keep_unwritten:
self._enable_read_access()
old_data = self.flash.target.read_memory_block8(page_data_end, old_data_len)
else:
old_data = [self.flash.region.erased_byte_value] * old_data_len
current_page.data.extend(old_data)
self.program_byte_count += old_data_len
# Copy data to page and increment pos
space_left_in_page = page_info.size - len(current_page.data)
space_left_in_data = len(flash_operation.data) - pos
amount = min(space_left_in_page, space_left_in_data)
current_page.data.extend(flash_operation.data[pos:pos + amount])
self.program_byte_count += amount
#increment position
pos += amount
# Fill the page gap at the end if there is one
if len(current_page.data) != current_page.size:
page_data_end = current_page.addr + len(current_page.data)
old_data_len = current_page.size - len(current_page.data)
if keep_unwritten and self.flash.region.is_readable:
self._enable_read_access()
old_data = self.flash.target.read_memory_block8(page_data_end, old_data_len)
else:
old_data = [self.flash.region.erased_byte_value] * old_data_len
current_page.data.extend(old_data)
self.program_byte_count += old_data_len
# Go back through sectors and fill any missing pages with existing data.
if keep_unwritten and self.flash.region.is_readable:
self._fill_unwritten_sector_pages()
|
[
"def",
"_build_sectors_and_pages",
"(",
"self",
",",
"keep_unwritten",
")",
":",
"assert",
"len",
"(",
"self",
".",
"flash_operation_list",
")",
">",
"0",
"self",
".",
"program_byte_count",
"=",
"0",
"flash_addr",
"=",
"self",
".",
"flash_operation_list",
"[",
"0",
"]",
".",
"addr",
"sector_info",
"=",
"self",
".",
"flash",
".",
"get_sector_info",
"(",
"flash_addr",
")",
"if",
"sector_info",
"is",
"None",
":",
"raise",
"FlashFailure",
"(",
"\"Attempt to program flash at invalid address 0x%08x\"",
"%",
"flash_addr",
")",
"page_info",
"=",
"self",
".",
"flash",
".",
"get_page_info",
"(",
"flash_addr",
")",
"if",
"page_info",
"is",
"None",
":",
"raise",
"FlashFailure",
"(",
"\"Attempt to program flash at invalid address 0x%08x\"",
"%",
"flash_addr",
")",
"current_sector",
"=",
"_FlashSector",
"(",
"sector_info",
")",
"self",
".",
"sector_list",
".",
"append",
"(",
"current_sector",
")",
"current_page",
"=",
"_FlashPage",
"(",
"page_info",
")",
"current_sector",
".",
"add_page",
"(",
"current_page",
")",
"self",
".",
"page_list",
".",
"append",
"(",
"current_page",
")",
"for",
"flash_operation",
"in",
"self",
".",
"flash_operation_list",
":",
"pos",
"=",
"0",
"while",
"pos",
"<",
"len",
"(",
"flash_operation",
".",
"data",
")",
":",
"flash_addr",
"=",
"flash_operation",
".",
"addr",
"+",
"pos",
"# Check if operation is in a different sector.",
"if",
"flash_addr",
">=",
"current_sector",
".",
"addr",
"+",
"current_sector",
".",
"size",
":",
"sector_info",
"=",
"self",
".",
"flash",
".",
"get_sector_info",
"(",
"flash_addr",
")",
"if",
"sector_info",
"is",
"None",
":",
"raise",
"FlashFailure",
"(",
"\"Attempt to program flash at invalid address 0x%08x\"",
"%",
"flash_addr",
")",
"current_sector",
"=",
"_FlashSector",
"(",
"sector_info",
")",
"self",
".",
"sector_list",
".",
"append",
"(",
"current_sector",
")",
"# Check if operation is in a different page",
"if",
"flash_addr",
">=",
"current_page",
".",
"addr",
"+",
"current_page",
".",
"size",
":",
"page_info",
"=",
"self",
".",
"flash",
".",
"get_page_info",
"(",
"flash_addr",
")",
"if",
"page_info",
"is",
"None",
":",
"raise",
"FlashFailure",
"(",
"\"Attempt to program flash at invalid address 0x%08x\"",
"%",
"flash_addr",
")",
"current_page",
"=",
"_FlashPage",
"(",
"page_info",
")",
"current_sector",
".",
"add_page",
"(",
"current_page",
")",
"self",
".",
"page_list",
".",
"append",
"(",
"current_page",
")",
"# Fill the page gap if there is one",
"page_data_end",
"=",
"current_page",
".",
"addr",
"+",
"len",
"(",
"current_page",
".",
"data",
")",
"if",
"flash_addr",
"!=",
"page_data_end",
":",
"old_data_len",
"=",
"flash_addr",
"-",
"page_data_end",
"if",
"keep_unwritten",
":",
"self",
".",
"_enable_read_access",
"(",
")",
"old_data",
"=",
"self",
".",
"flash",
".",
"target",
".",
"read_memory_block8",
"(",
"page_data_end",
",",
"old_data_len",
")",
"else",
":",
"old_data",
"=",
"[",
"self",
".",
"flash",
".",
"region",
".",
"erased_byte_value",
"]",
"*",
"old_data_len",
"current_page",
".",
"data",
".",
"extend",
"(",
"old_data",
")",
"self",
".",
"program_byte_count",
"+=",
"old_data_len",
"# Copy data to page and increment pos",
"space_left_in_page",
"=",
"page_info",
".",
"size",
"-",
"len",
"(",
"current_page",
".",
"data",
")",
"space_left_in_data",
"=",
"len",
"(",
"flash_operation",
".",
"data",
")",
"-",
"pos",
"amount",
"=",
"min",
"(",
"space_left_in_page",
",",
"space_left_in_data",
")",
"current_page",
".",
"data",
".",
"extend",
"(",
"flash_operation",
".",
"data",
"[",
"pos",
":",
"pos",
"+",
"amount",
"]",
")",
"self",
".",
"program_byte_count",
"+=",
"amount",
"#increment position",
"pos",
"+=",
"amount",
"# Fill the page gap at the end if there is one",
"if",
"len",
"(",
"current_page",
".",
"data",
")",
"!=",
"current_page",
".",
"size",
":",
"page_data_end",
"=",
"current_page",
".",
"addr",
"+",
"len",
"(",
"current_page",
".",
"data",
")",
"old_data_len",
"=",
"current_page",
".",
"size",
"-",
"len",
"(",
"current_page",
".",
"data",
")",
"if",
"keep_unwritten",
"and",
"self",
".",
"flash",
".",
"region",
".",
"is_readable",
":",
"self",
".",
"_enable_read_access",
"(",
")",
"old_data",
"=",
"self",
".",
"flash",
".",
"target",
".",
"read_memory_block8",
"(",
"page_data_end",
",",
"old_data_len",
")",
"else",
":",
"old_data",
"=",
"[",
"self",
".",
"flash",
".",
"region",
".",
"erased_byte_value",
"]",
"*",
"old_data_len",
"current_page",
".",
"data",
".",
"extend",
"(",
"old_data",
")",
"self",
".",
"program_byte_count",
"+=",
"old_data_len",
"# Go back through sectors and fill any missing pages with existing data.",
"if",
"keep_unwritten",
"and",
"self",
".",
"flash",
".",
"region",
".",
"is_readable",
":",
"self",
".",
"_fill_unwritten_sector_pages",
"(",
")"
] | 49.590909 | 23.863636 |
def generate_multiple_parameters(self, parameter_id_list):
"""Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Call 'generate_parameters()' by 'count' times by default.
User code must override either this function or 'generate_parameters()'.
If there's no more trial, user should raise nni.NoMoreTrialError exception in generate_parameters().
If so, this function will only return sets of trial (hyper-)parameters that have already been collected.
parameter_id_list: list of int
"""
result = []
for parameter_id in parameter_id_list:
try:
_logger.debug("generating param for {}".format(parameter_id))
res = self.generate_parameters(parameter_id)
except nni.NoMoreTrialError:
return result
result.append(res)
return result
|
[
"def",
"generate_multiple_parameters",
"(",
"self",
",",
"parameter_id_list",
")",
":",
"result",
"=",
"[",
"]",
"for",
"parameter_id",
"in",
"parameter_id_list",
":",
"try",
":",
"_logger",
".",
"debug",
"(",
"\"generating param for {}\"",
".",
"format",
"(",
"parameter_id",
")",
")",
"res",
"=",
"self",
".",
"generate_parameters",
"(",
"parameter_id",
")",
"except",
"nni",
".",
"NoMoreTrialError",
":",
"return",
"result",
"result",
".",
"append",
"(",
"res",
")",
"return",
"result"
] | 53.411765 | 21.941176 |
def oauth_only(function):
"""Decorator to restrict some GitHubTools methods to run only with OAuth"""
def check_for_oauth(self, *args, **kwargs):
"""
Returns False if GitHubTools instance is not authenticated, or return
the decorated fucntion if it is.
"""
if not self.is_authenticated:
self.oops("To use putgist you have to set your GETGIST_TOKEN")
self.oops("(see `putgist --help` for details)")
return False
return function(self, *args, **kwargs)
return check_for_oauth
|
[
"def",
"oauth_only",
"(",
"function",
")",
":",
"def",
"check_for_oauth",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Returns False if GitHubTools instance is not authenticated, or return\n the decorated fucntion if it is.\n \"\"\"",
"if",
"not",
"self",
".",
"is_authenticated",
":",
"self",
".",
"oops",
"(",
"\"To use putgist you have to set your GETGIST_TOKEN\"",
")",
"self",
".",
"oops",
"(",
"\"(see `putgist --help` for details)\"",
")",
"return",
"False",
"return",
"function",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"check_for_oauth"
] | 37.066667 | 15.4 |
def getCallerInfo(depth=2):
"""Utility function to get information about function callers
The information is the tuple (function/method name, filename, class)
The class will be None if the caller is just a function and not an object
method.
:param depth: (int) how far back in the callstack to go to extract the caller
info
"""
f = sys._getframe(depth)
method_name = f.f_code.co_name
filename = f.f_code.co_filename
arg_class = None
args = inspect.getargvalues(f)
if len(args[0]) > 0:
arg_name = args[0][0] # potentially the 'self' arg if its a method
arg_class = args[3][arg_name].__class__.__name__
return (method_name, filename, arg_class)
|
[
"def",
"getCallerInfo",
"(",
"depth",
"=",
"2",
")",
":",
"f",
"=",
"sys",
".",
"_getframe",
"(",
"depth",
")",
"method_name",
"=",
"f",
".",
"f_code",
".",
"co_name",
"filename",
"=",
"f",
".",
"f_code",
".",
"co_filename",
"arg_class",
"=",
"None",
"args",
"=",
"inspect",
".",
"getargvalues",
"(",
"f",
")",
"if",
"len",
"(",
"args",
"[",
"0",
"]",
")",
">",
"0",
":",
"arg_name",
"=",
"args",
"[",
"0",
"]",
"[",
"0",
"]",
"# potentially the 'self' arg if its a method",
"arg_class",
"=",
"args",
"[",
"3",
"]",
"[",
"arg_name",
"]",
".",
"__class__",
".",
"__name__",
"return",
"(",
"method_name",
",",
"filename",
",",
"arg_class",
")"
] | 31.857143 | 21.761905 |
def format_(blocks):
"""Produce Python module from blocks of tests
Arguments:
blocks (list): Blocks of tests from func:`parse()`
"""
tests = list()
function_count = 0 # For each test to have a unique name
for block in blocks:
# Validate docstring format of body
if not any(line[:3] == ">>>" for line in block["body"]):
# A doctest requires at least one `>>>` directive.
block["body"].insert(0, ">>> assert False, "
"'Body must be in docstring format'\n")
# Validate binding on first line
if not block["binding"] in ("PySide", "PySide2", "PyQt5", "PyQt4"):
block["body"].insert(0, ">>> assert False, "
"'Invalid binding'\n")
if sys.version_info > (3, 4) and block["binding"] in ("PySide"):
# Skip caveat test if it requires PySide on Python > 3.4
continue
else:
function_count += 1
block["header"] = block["header"]
block["count"] = str(function_count)
block["body"] = " ".join(block["body"])
tests.append("""\
def test_{count}_{header}():
'''Test {header}
>>> import os, sys
>>> PYTHON = sys.version_info[0]
>>> long = int if PYTHON == 3 else long
>>> _ = os.environ.pop("QT_VERBOSE", None) # Disable debug output
>>> os.environ["QT_PREFERRED_BINDING"] = "{binding}"
{body}
'''
""".format(**block))
return tests
|
[
"def",
"format_",
"(",
"blocks",
")",
":",
"tests",
"=",
"list",
"(",
")",
"function_count",
"=",
"0",
"# For each test to have a unique name",
"for",
"block",
"in",
"blocks",
":",
"# Validate docstring format of body",
"if",
"not",
"any",
"(",
"line",
"[",
":",
"3",
"]",
"==",
"\">>>\"",
"for",
"line",
"in",
"block",
"[",
"\"body\"",
"]",
")",
":",
"# A doctest requires at least one `>>>` directive.",
"block",
"[",
"\"body\"",
"]",
".",
"insert",
"(",
"0",
",",
"\">>> assert False, \"",
"\"'Body must be in docstring format'\\n\"",
")",
"# Validate binding on first line",
"if",
"not",
"block",
"[",
"\"binding\"",
"]",
"in",
"(",
"\"PySide\"",
",",
"\"PySide2\"",
",",
"\"PyQt5\"",
",",
"\"PyQt4\"",
")",
":",
"block",
"[",
"\"body\"",
"]",
".",
"insert",
"(",
"0",
",",
"\">>> assert False, \"",
"\"'Invalid binding'\\n\"",
")",
"if",
"sys",
".",
"version_info",
">",
"(",
"3",
",",
"4",
")",
"and",
"block",
"[",
"\"binding\"",
"]",
"in",
"(",
"\"PySide\"",
")",
":",
"# Skip caveat test if it requires PySide on Python > 3.4",
"continue",
"else",
":",
"function_count",
"+=",
"1",
"block",
"[",
"\"header\"",
"]",
"=",
"block",
"[",
"\"header\"",
"]",
"block",
"[",
"\"count\"",
"]",
"=",
"str",
"(",
"function_count",
")",
"block",
"[",
"\"body\"",
"]",
"=",
"\" \"",
".",
"join",
"(",
"block",
"[",
"\"body\"",
"]",
")",
"tests",
".",
"append",
"(",
"\"\"\"\\\n\ndef test_{count}_{header}():\n '''Test {header}\n\n >>> import os, sys\n >>> PYTHON = sys.version_info[0]\n >>> long = int if PYTHON == 3 else long\n >>> _ = os.environ.pop(\"QT_VERBOSE\", None) # Disable debug output\n >>> os.environ[\"QT_PREFERRED_BINDING\"] = \"{binding}\"\n {body}\n '''\n\n \"\"\"",
".",
"format",
"(",
"*",
"*",
"block",
")",
")",
"return",
"tests"
] | 30.958333 | 22.0625 |
def not_evaluator(conditions, leaf_evaluator):
""" Evaluates a list of conditions as if the evaluator had been applied
to a single entry and NOT was applied to the result.
Args:
conditions: List of conditions ex: [operand_1, operand_2].
leaf_evaluator: Function which will be called to evaluate leaf condition values.
Returns:
Boolean:
- True if the operand evaluates to False.
- False if the operand evaluates to True.
None: if conditions is empty or condition couldn't be evaluated.
"""
if not len(conditions) > 0:
return None
result = evaluate(conditions[0], leaf_evaluator)
return None if result is None else not result
|
[
"def",
"not_evaluator",
"(",
"conditions",
",",
"leaf_evaluator",
")",
":",
"if",
"not",
"len",
"(",
"conditions",
")",
">",
"0",
":",
"return",
"None",
"result",
"=",
"evaluate",
"(",
"conditions",
"[",
"0",
"]",
",",
"leaf_evaluator",
")",
"return",
"None",
"if",
"result",
"is",
"None",
"else",
"not",
"result"
] | 34.526316 | 20.631579 |
def flush_task_and_object_metadata_unsafe():
"""This removes some critical state from the Redis shards.
In a multitenant environment, this will flush metadata for all jobs, which
may be undesirable.
This removes all of the object and task metadata. This can be used to try
to address out-of-memory errors caused by the accumulation of metadata in
Redis. However, after running this command, fault tolerance will most
likely not work.
"""
ray.worker.global_worker.check_connected()
def flush_shard(redis_client):
# Flush the task table. Note that this also flushes the driver tasks
# which may be undesirable.
num_task_keys_deleted = 0
for key in redis_client.scan_iter(match=TASK_PREFIX + b"*"):
num_task_keys_deleted += redis_client.delete(key)
print("Deleted {} task keys from Redis.".format(num_task_keys_deleted))
# Flush the object information.
num_object_keys_deleted = 0
for key in redis_client.scan_iter(match=OBJECT_INFO_PREFIX + b"*"):
num_object_keys_deleted += redis_client.delete(key)
print("Deleted {} object info keys from Redis.".format(
num_object_keys_deleted))
# Flush the object locations.
num_object_location_keys_deleted = 0
for key in redis_client.scan_iter(match=OBJECT_LOCATION_PREFIX + b"*"):
num_object_location_keys_deleted += redis_client.delete(key)
print("Deleted {} object location keys from Redis.".format(
num_object_location_keys_deleted))
# Loop over the shards and flush all of them.
for redis_client in ray.worker.global_state.redis_clients:
flush_shard(redis_client)
|
[
"def",
"flush_task_and_object_metadata_unsafe",
"(",
")",
":",
"ray",
".",
"worker",
".",
"global_worker",
".",
"check_connected",
"(",
")",
"def",
"flush_shard",
"(",
"redis_client",
")",
":",
"# Flush the task table. Note that this also flushes the driver tasks",
"# which may be undesirable.",
"num_task_keys_deleted",
"=",
"0",
"for",
"key",
"in",
"redis_client",
".",
"scan_iter",
"(",
"match",
"=",
"TASK_PREFIX",
"+",
"b\"*\"",
")",
":",
"num_task_keys_deleted",
"+=",
"redis_client",
".",
"delete",
"(",
"key",
")",
"print",
"(",
"\"Deleted {} task keys from Redis.\"",
".",
"format",
"(",
"num_task_keys_deleted",
")",
")",
"# Flush the object information.",
"num_object_keys_deleted",
"=",
"0",
"for",
"key",
"in",
"redis_client",
".",
"scan_iter",
"(",
"match",
"=",
"OBJECT_INFO_PREFIX",
"+",
"b\"*\"",
")",
":",
"num_object_keys_deleted",
"+=",
"redis_client",
".",
"delete",
"(",
"key",
")",
"print",
"(",
"\"Deleted {} object info keys from Redis.\"",
".",
"format",
"(",
"num_object_keys_deleted",
")",
")",
"# Flush the object locations.",
"num_object_location_keys_deleted",
"=",
"0",
"for",
"key",
"in",
"redis_client",
".",
"scan_iter",
"(",
"match",
"=",
"OBJECT_LOCATION_PREFIX",
"+",
"b\"*\"",
")",
":",
"num_object_location_keys_deleted",
"+=",
"redis_client",
".",
"delete",
"(",
"key",
")",
"print",
"(",
"\"Deleted {} object location keys from Redis.\"",
".",
"format",
"(",
"num_object_location_keys_deleted",
")",
")",
"# Loop over the shards and flush all of them.",
"for",
"redis_client",
"in",
"ray",
".",
"worker",
".",
"global_state",
".",
"redis_clients",
":",
"flush_shard",
"(",
"redis_client",
")"
] | 44.578947 | 21.394737 |
def _get_base_model(self):
"""
:return: base model from Keras based on user-supplied model name
"""
if self.model_name == 'inception_v3':
return InceptionV3(weights='imagenet', include_top=False)
elif self.model_name == 'xception':
return Xception(weights='imagenet', include_top=False)
elif self.model_name == 'vgg16':
return VGG16(weights='imagenet', include_top=False)
elif self.model_name == 'vgg19':
return VGG19(weights='imagenet', include_top=False)
elif self.model_name == 'resnet50':
return ResNet50(weights='imagenet', include_top=False)
else:
raise ValueError('Cannot find base model %s' % self.model_name)
|
[
"def",
"_get_base_model",
"(",
"self",
")",
":",
"if",
"self",
".",
"model_name",
"==",
"'inception_v3'",
":",
"return",
"InceptionV3",
"(",
"weights",
"=",
"'imagenet'",
",",
"include_top",
"=",
"False",
")",
"elif",
"self",
".",
"model_name",
"==",
"'xception'",
":",
"return",
"Xception",
"(",
"weights",
"=",
"'imagenet'",
",",
"include_top",
"=",
"False",
")",
"elif",
"self",
".",
"model_name",
"==",
"'vgg16'",
":",
"return",
"VGG16",
"(",
"weights",
"=",
"'imagenet'",
",",
"include_top",
"=",
"False",
")",
"elif",
"self",
".",
"model_name",
"==",
"'vgg19'",
":",
"return",
"VGG19",
"(",
"weights",
"=",
"'imagenet'",
",",
"include_top",
"=",
"False",
")",
"elif",
"self",
".",
"model_name",
"==",
"'resnet50'",
":",
"return",
"ResNet50",
"(",
"weights",
"=",
"'imagenet'",
",",
"include_top",
"=",
"False",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Cannot find base model %s'",
"%",
"self",
".",
"model_name",
")"
] | 46.625 | 15.375 |
def compile_mof_file(self, mof_file, namespace=None, search_paths=None,
verbose=None):
"""
Compile the MOF definitions in the specified file (and its included
files) and add the resulting CIM objects to the specified CIM namespace
of the mock repository.
If the namespace does not exist, :exc:`~pywbem.CIMError` with status
CIM_ERR_INVALID_NAMESPACE is raised.
This method supports all MOF pragmas, and specifically the include
pragma.
If a CIM class or CIM qualifier type to be added already exists in the
target namespace with the same name (comparing case insensitively),
this method raises :exc:`~pywbem.CIMError`.
If a CIM instance to be added already exists in the target namespace
with the same keybinding values, this method raises
:exc:`~pywbem.CIMError`.
In all cases where this method raises an exception, the mock repository
remains unchanged.
Parameters:
mof_file (:term:`string`):
Path name of the file containing the MOF definitions to be compiled.
namespace (:term:`string`):
The name of the target CIM namespace in the mock repository. This
namespace is also used for lookup of any existing or dependent
CIM objects. If `None`, the default namespace of the connection is
used.
search_paths (:term:`py:iterable` of :term:`string`):
An iterable of directory path names where MOF dependent files will
be looked up.
See the description of the `search_path` init parameter of the
:class:`~pywbem.MOFCompiler` class for more information on MOF
dependent files.
verbose (:class:`py:bool`):
Controls whether to issue more detailed compiler messages.
Raises:
IOError: MOF file not found.
:exc:`~pywbem.MOFParseError`: Compile error in the MOF.
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
:exc:`~pywbem.CIMError`: Failure related to the CIM objects in the
mock repository.
"""
namespace = namespace or self.default_namespace
self._validate_namespace(namespace)
mofcomp = MOFCompiler(_MockMOFWBEMConnection(self),
search_paths=search_paths,
verbose=verbose)
mofcomp.compile_file(mof_file, namespace)
|
[
"def",
"compile_mof_file",
"(",
"self",
",",
"mof_file",
",",
"namespace",
"=",
"None",
",",
"search_paths",
"=",
"None",
",",
"verbose",
"=",
"None",
")",
":",
"namespace",
"=",
"namespace",
"or",
"self",
".",
"default_namespace",
"self",
".",
"_validate_namespace",
"(",
"namespace",
")",
"mofcomp",
"=",
"MOFCompiler",
"(",
"_MockMOFWBEMConnection",
"(",
"self",
")",
",",
"search_paths",
"=",
"search_paths",
",",
"verbose",
"=",
"verbose",
")",
"mofcomp",
".",
"compile_file",
"(",
"mof_file",
",",
"namespace",
")"
] | 39.634921 | 25.793651 |
def close(self):
"""Closes the stream."""
if self.call is None:
return
self._request_queue.put(None)
self.call.cancel()
self._request_generator = None
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"call",
"is",
"None",
":",
"return",
"self",
".",
"_request_queue",
".",
"put",
"(",
"None",
")",
"self",
".",
"call",
".",
"cancel",
"(",
")",
"self",
".",
"_request_generator",
"=",
"None"
] | 24.5 | 14.5 |
def changeGroupImageRemote(self, image_url, thread_id=None):
"""
Changes a thread image from a URL
:param image_url: URL of an image to upload and change
:param thread_id: User/Group ID to change image. See :ref:`intro_threads`
:raises: FBchatException if request failed
"""
(image_id, mimetype), = self._upload(get_files_from_urls([image_url]))
return self._changeGroupImage(image_id, thread_id)
|
[
"def",
"changeGroupImageRemote",
"(",
"self",
",",
"image_url",
",",
"thread_id",
"=",
"None",
")",
":",
"(",
"image_id",
",",
"mimetype",
")",
",",
"=",
"self",
".",
"_upload",
"(",
"get_files_from_urls",
"(",
"[",
"image_url",
"]",
")",
")",
"return",
"self",
".",
"_changeGroupImage",
"(",
"image_id",
",",
"thread_id",
")"
] | 45.2 | 19 |
def peek(init, exposes, debug=False):
"""
Default deserializer factory.
Arguments:
init (callable): type constructor.
exposes (iterable): attributes to be peeked and passed to `init`.
Returns:
callable: deserializer (`peek` routine).
"""
def _peek(store, container, _stack=None):
args = [ store.peek(objname, container, _stack=_stack) \
for objname in exposes ]
if debug:
print(args)
return init(*args)
return _peek
|
[
"def",
"peek",
"(",
"init",
",",
"exposes",
",",
"debug",
"=",
"False",
")",
":",
"def",
"_peek",
"(",
"store",
",",
"container",
",",
"_stack",
"=",
"None",
")",
":",
"args",
"=",
"[",
"store",
".",
"peek",
"(",
"objname",
",",
"container",
",",
"_stack",
"=",
"_stack",
")",
"for",
"objname",
"in",
"exposes",
"]",
"if",
"debug",
":",
"print",
"(",
"args",
")",
"return",
"init",
"(",
"*",
"args",
")",
"return",
"_peek"
] | 23.809524 | 19.904762 |
def get_element_by_class_name_or_raise(self, class_name):
"""Return the SchemaElement for the specified class name, asserting that it exists."""
if class_name not in self._elements:
raise InvalidClassError(u'Class does not exist: {}'.format(class_name))
return self._elements[class_name]
|
[
"def",
"get_element_by_class_name_or_raise",
"(",
"self",
",",
"class_name",
")",
":",
"if",
"class_name",
"not",
"in",
"self",
".",
"_elements",
":",
"raise",
"InvalidClassError",
"(",
"u'Class does not exist: {}'",
".",
"format",
"(",
"class_name",
")",
")",
"return",
"self",
".",
"_elements",
"[",
"class_name",
"]"
] | 53.166667 | 17.5 |
def wait_for(self, timeout):
"""
A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on.
"""
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator
|
[
"def",
"wait_for",
"(",
"self",
",",
"timeout",
")",
":",
"def",
"decorator",
"(",
"function",
")",
":",
"@",
"wrapt",
".",
"decorator",
"def",
"wrapper",
"(",
"function",
",",
"_",
",",
"args",
",",
"kwargs",
")",
":",
"@",
"self",
".",
"run_in_reactor",
"def",
"run",
"(",
")",
":",
"return",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"eventual_result",
"=",
"run",
"(",
")",
"try",
":",
"return",
"eventual_result",
".",
"wait",
"(",
"timeout",
")",
"except",
"TimeoutError",
":",
"eventual_result",
".",
"cancel",
"(",
")",
"raise",
"result",
"=",
"wrapper",
"(",
"function",
")",
"# Expose underling function for testing purposes; this attribute is",
"# deprecated, use __wrapped__ instead:",
"try",
":",
"result",
".",
"wrapped_function",
"=",
"function",
"except",
"AttributeError",
":",
"pass",
"return",
"result",
"return",
"decorator"
] | 34.8 | 18.742857 |
def max(self):
"""Get maximum target value found and corresponding parametes."""
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res
|
[
"def",
"max",
"(",
"self",
")",
":",
"try",
":",
"res",
"=",
"{",
"'target'",
":",
"self",
".",
"target",
".",
"max",
"(",
")",
",",
"'params'",
":",
"dict",
"(",
"zip",
"(",
"self",
".",
"keys",
",",
"self",
".",
"params",
"[",
"self",
".",
"target",
".",
"argmax",
"(",
")",
"]",
")",
")",
"}",
"except",
"ValueError",
":",
"res",
"=",
"{",
"}",
"return",
"res"
] | 29.666667 | 18.583333 |
def get_config_bool(name):
"""Checks if a config value is set to a valid bool value."""
cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX)
return cli_config.getboolean('servicefabric', name, False)
|
[
"def",
"get_config_bool",
"(",
"name",
")",
":",
"cli_config",
"=",
"CLIConfig",
"(",
"SF_CLI_CONFIG_DIR",
",",
"SF_CLI_ENV_VAR_PREFIX",
")",
"return",
"cli_config",
".",
"getboolean",
"(",
"'servicefabric'",
",",
"name",
",",
"False",
")"
] | 44 | 20.8 |
def _plt_pydot(self, fout_img):
"""Plot using the pydot graphics engine."""
dag = self.get_pydot_graph()
self.wr_pydot_dag(fout_img, dag)
|
[
"def",
"_plt_pydot",
"(",
"self",
",",
"fout_img",
")",
":",
"dag",
"=",
"self",
".",
"get_pydot_graph",
"(",
")",
"self",
".",
"wr_pydot_dag",
"(",
"fout_img",
",",
"dag",
")"
] | 39.5 | 3.25 |
def _chk_truncate(self):
'''
Checks whether the frame should be truncated. If so, slices
the frame up.
'''
# Column of which first element is used to determine width of a dot col
self.tr_size_col = -1
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal (why else = 0)
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
max_rows_adj = self.h - n_add_rows # rows available to fill with actual data
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = frame[:, :col_num].concat(frame[:, -col_num:], axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 0:
row_num = len(frame)
if max_rows_adj == 1:
row_num = max_rows
frame = frame[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = frame[:row_num, :].concat(frame[-row_num:, :])
self.tr_row_num = row_num
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
|
[
"def",
"_chk_truncate",
"(",
"self",
")",
":",
"# Column of which first element is used to determine width of a dot col",
"self",
".",
"tr_size_col",
"=",
"-",
"1",
"# Cut the data to the information actually printed",
"max_cols",
"=",
"self",
".",
"max_cols",
"max_rows",
"=",
"self",
".",
"max_rows",
"if",
"max_cols",
"==",
"0",
"or",
"max_rows",
"==",
"0",
":",
"# assume we are in the terminal (why else = 0)",
"(",
"w",
",",
"h",
")",
"=",
"get_terminal_size",
"(",
")",
"self",
".",
"w",
"=",
"w",
"self",
".",
"h",
"=",
"h",
"if",
"self",
".",
"max_rows",
"==",
"0",
":",
"dot_row",
"=",
"1",
"prompt_row",
"=",
"1",
"if",
"self",
".",
"show_dimensions",
":",
"show_dimension_rows",
"=",
"3",
"n_add_rows",
"=",
"self",
".",
"header",
"+",
"dot_row",
"+",
"show_dimension_rows",
"+",
"prompt_row",
"max_rows_adj",
"=",
"self",
".",
"h",
"-",
"n_add_rows",
"# rows available to fill with actual data",
"self",
".",
"max_rows_adj",
"=",
"max_rows_adj",
"# Format only rows and columns that could potentially fit the screen",
"if",
"max_cols",
"==",
"0",
"and",
"len",
"(",
"self",
".",
"frame",
".",
"columns",
")",
">",
"w",
":",
"max_cols",
"=",
"w",
"if",
"max_rows",
"==",
"0",
"and",
"len",
"(",
"self",
".",
"frame",
")",
">",
"h",
":",
"max_rows",
"=",
"h",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'max_rows_adj'",
")",
":",
"self",
".",
"max_rows_adj",
"=",
"max_rows",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'max_cols_adj'",
")",
":",
"self",
".",
"max_cols_adj",
"=",
"max_cols",
"max_cols_adj",
"=",
"self",
".",
"max_cols_adj",
"max_rows_adj",
"=",
"self",
".",
"max_rows_adj",
"truncate_h",
"=",
"max_cols_adj",
"and",
"(",
"len",
"(",
"self",
".",
"columns",
")",
">",
"max_cols_adj",
")",
"truncate_v",
"=",
"max_rows_adj",
"and",
"(",
"len",
"(",
"self",
".",
"frame",
")",
">",
"max_rows_adj",
")",
"frame",
"=",
"self",
".",
"frame",
"if",
"truncate_h",
":",
"if",
"max_cols_adj",
"==",
"0",
":",
"col_num",
"=",
"len",
"(",
"frame",
".",
"columns",
")",
"elif",
"max_cols_adj",
"==",
"1",
":",
"frame",
"=",
"frame",
"[",
":",
",",
":",
"max_cols",
"]",
"col_num",
"=",
"max_cols",
"else",
":",
"col_num",
"=",
"(",
"max_cols_adj",
"//",
"2",
")",
"frame",
"=",
"frame",
"[",
":",
",",
":",
"col_num",
"]",
".",
"concat",
"(",
"frame",
"[",
":",
",",
"-",
"col_num",
":",
"]",
",",
"axis",
"=",
"1",
")",
"self",
".",
"tr_col_num",
"=",
"col_num",
"if",
"truncate_v",
":",
"if",
"max_rows_adj",
"==",
"0",
":",
"row_num",
"=",
"len",
"(",
"frame",
")",
"if",
"max_rows_adj",
"==",
"1",
":",
"row_num",
"=",
"max_rows",
"frame",
"=",
"frame",
"[",
":",
"max_rows",
",",
":",
"]",
"else",
":",
"row_num",
"=",
"max_rows_adj",
"//",
"2",
"frame",
"=",
"frame",
"[",
":",
"row_num",
",",
":",
"]",
".",
"concat",
"(",
"frame",
"[",
"-",
"row_num",
":",
",",
":",
"]",
")",
"self",
".",
"tr_row_num",
"=",
"row_num",
"self",
".",
"tr_frame",
"=",
"frame",
"self",
".",
"truncate_h",
"=",
"truncate_h",
"self",
".",
"truncate_v",
"=",
"truncate_v",
"self",
".",
"is_truncated",
"=",
"self",
".",
"truncate_h",
"or",
"self",
".",
"truncate_v"
] | 36.594203 | 17.811594 |
def modified_lines_from_diff(self, diff):
"""Returns the changed lines in a diff.
- Potentially this is vc specific (if not using udiff).
Note: this returns the line numbers in descending order.
"""
from pep8radius.diff import modified_lines_from_udiff
for start, end in modified_lines_from_udiff(diff):
yield start, end
|
[
"def",
"modified_lines_from_diff",
"(",
"self",
",",
"diff",
")",
":",
"from",
"pep8radius",
".",
"diff",
"import",
"modified_lines_from_udiff",
"for",
"start",
",",
"end",
"in",
"modified_lines_from_udiff",
"(",
"diff",
")",
":",
"yield",
"start",
",",
"end"
] | 33.909091 | 19.909091 |
def reinforce(self, **kwargs):
"""
Reinforces the grid and calculates grid expansion costs.
See :meth:`edisgo.flex_opt.reinforce_grid` for more information.
"""
results = reinforce_grid(
self, max_while_iterations=kwargs.get(
'max_while_iterations', 10),
copy_graph=kwargs.get('copy_graph', False),
timesteps_pfa=kwargs.get('timesteps_pfa', None),
combined_analysis=kwargs.get('combined_analysis', False))
# add measure to Results object
if not kwargs.get('copy_graph', False):
self.network.results.measures = 'grid_expansion'
return results
|
[
"def",
"reinforce",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"results",
"=",
"reinforce_grid",
"(",
"self",
",",
"max_while_iterations",
"=",
"kwargs",
".",
"get",
"(",
"'max_while_iterations'",
",",
"10",
")",
",",
"copy_graph",
"=",
"kwargs",
".",
"get",
"(",
"'copy_graph'",
",",
"False",
")",
",",
"timesteps_pfa",
"=",
"kwargs",
".",
"get",
"(",
"'timesteps_pfa'",
",",
"None",
")",
",",
"combined_analysis",
"=",
"kwargs",
".",
"get",
"(",
"'combined_analysis'",
",",
"False",
")",
")",
"# add measure to Results object",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'copy_graph'",
",",
"False",
")",
":",
"self",
".",
"network",
".",
"results",
".",
"measures",
"=",
"'grid_expansion'",
"return",
"results"
] | 35.105263 | 18.789474 |
def _access_rule(method,
ip=None,
port=None,
proto='tcp',
direction='in',
port_origin='d',
ip_origin='d',
comment=''):
'''
Handles the cmd execution for allow and deny commands.
'''
if _status_csf():
if ip is None:
return {'error': 'You must supply an ip address or CIDR.'}
if port is None:
args = _build_args(method, ip, comment)
return __csf_cmd(args)
else:
if method not in ['allow', 'deny']:
return {'error': 'Only allow and deny rules are allowed when specifying a port.'}
return _access_rule_with_port(method=method,
ip=ip,
port=port,
proto=proto,
direction=direction,
port_origin=port_origin,
ip_origin=ip_origin,
comment=comment)
|
[
"def",
"_access_rule",
"(",
"method",
",",
"ip",
"=",
"None",
",",
"port",
"=",
"None",
",",
"proto",
"=",
"'tcp'",
",",
"direction",
"=",
"'in'",
",",
"port_origin",
"=",
"'d'",
",",
"ip_origin",
"=",
"'d'",
",",
"comment",
"=",
"''",
")",
":",
"if",
"_status_csf",
"(",
")",
":",
"if",
"ip",
"is",
"None",
":",
"return",
"{",
"'error'",
":",
"'You must supply an ip address or CIDR.'",
"}",
"if",
"port",
"is",
"None",
":",
"args",
"=",
"_build_args",
"(",
"method",
",",
"ip",
",",
"comment",
")",
"return",
"__csf_cmd",
"(",
"args",
")",
"else",
":",
"if",
"method",
"not",
"in",
"[",
"'allow'",
",",
"'deny'",
"]",
":",
"return",
"{",
"'error'",
":",
"'Only allow and deny rules are allowed when specifying a port.'",
"}",
"return",
"_access_rule_with_port",
"(",
"method",
"=",
"method",
",",
"ip",
"=",
"ip",
",",
"port",
"=",
"port",
",",
"proto",
"=",
"proto",
",",
"direction",
"=",
"direction",
",",
"port_origin",
"=",
"port_origin",
",",
"ip_origin",
"=",
"ip_origin",
",",
"comment",
"=",
"comment",
")"
] | 40.928571 | 18.714286 |
def correct_hyperlinks(book_dir=BOOK_PATH, dest=None, include_tags=None,
ext='.nlpiabak', skip_untitled=True):
""" DEPRECATED (see translate_line_footnotes)
Find bad footnotes (only urls), visit the page, add the title to the footnote
>>> len(correct_hyperlinks(book_dir=BOOK_PATH, dest='cleaned_hyperlinks'))
2
>>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks'))
"""
# bad_url_lines = find_all_bad_footnote_urls(book_dir=book_dir)
# file_line_maps = []
return translate_book(translators=HyperlinkStyleCorrector().translate,
book_dir=book_dir, dest=dest, include_tags=include_tags,
ext=ext, skip_untitled=skip_untitled)
|
[
"def",
"correct_hyperlinks",
"(",
"book_dir",
"=",
"BOOK_PATH",
",",
"dest",
"=",
"None",
",",
"include_tags",
"=",
"None",
",",
"ext",
"=",
"'.nlpiabak'",
",",
"skip_untitled",
"=",
"True",
")",
":",
"# bad_url_lines = find_all_bad_footnote_urls(book_dir=book_dir)",
"# file_line_maps = []",
"return",
"translate_book",
"(",
"translators",
"=",
"HyperlinkStyleCorrector",
"(",
")",
".",
"translate",
",",
"book_dir",
"=",
"book_dir",
",",
"dest",
"=",
"dest",
",",
"include_tags",
"=",
"include_tags",
",",
"ext",
"=",
"ext",
",",
"skip_untitled",
"=",
"skip_untitled",
")"
] | 48.266667 | 27.2 |
def prompt_and_delete(path, no_input=False):
"""
Ask user if it's okay to delete the previously-downloaded file/directory.
If yes, delete it. If no, checks to see if the old version should be
reused. If yes, it's reused; otherwise, Cookiecutter exits.
:param path: Previously downloaded zipfile.
:param no_input: Suppress prompt to delete repo and just delete it.
:return: True if the content was deleted
"""
# Suppress prompt if called via API
if no_input:
ok_to_delete = True
else:
question = (
"You've downloaded {} before. "
"Is it okay to delete and re-download it?"
).format(path)
ok_to_delete = read_user_yes_no(question, 'yes')
if ok_to_delete:
if os.path.isdir(path):
rmtree(path)
else:
os.remove(path)
return True
else:
ok_to_reuse = read_user_yes_no(
"Do you want to re-use the existing version?", 'yes'
)
if ok_to_reuse:
return False
sys.exit()
|
[
"def",
"prompt_and_delete",
"(",
"path",
",",
"no_input",
"=",
"False",
")",
":",
"# Suppress prompt if called via API",
"if",
"no_input",
":",
"ok_to_delete",
"=",
"True",
"else",
":",
"question",
"=",
"(",
"\"You've downloaded {} before. \"",
"\"Is it okay to delete and re-download it?\"",
")",
".",
"format",
"(",
"path",
")",
"ok_to_delete",
"=",
"read_user_yes_no",
"(",
"question",
",",
"'yes'",
")",
"if",
"ok_to_delete",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"rmtree",
"(",
"path",
")",
"else",
":",
"os",
".",
"remove",
"(",
"path",
")",
"return",
"True",
"else",
":",
"ok_to_reuse",
"=",
"read_user_yes_no",
"(",
"\"Do you want to re-use the existing version?\"",
",",
"'yes'",
")",
"if",
"ok_to_reuse",
":",
"return",
"False",
"sys",
".",
"exit",
"(",
")"
] | 28.054054 | 20.702703 |
def cell_to_text(self):
"""Return the text representation for the cell"""
if self.is_code():
return self.code_to_text()
source = copy(self.source)
if not self.comment:
escape_code_start(source, self.ext, None)
return self.markdown_to_text(source)
|
[
"def",
"cell_to_text",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_code",
"(",
")",
":",
"return",
"self",
".",
"code_to_text",
"(",
")",
"source",
"=",
"copy",
"(",
"self",
".",
"source",
")",
"if",
"not",
"self",
".",
"comment",
":",
"escape_code_start",
"(",
"source",
",",
"self",
".",
"ext",
",",
"None",
")",
"return",
"self",
".",
"markdown_to_text",
"(",
"source",
")"
] | 33.666667 | 12 |
def pymux_key_to_prompt_toolkit_key_sequence(key):
"""
Turn a pymux description of a key. E.g. "C-a" or "M-x" into a
prompt-toolkit key sequence.
Raises `ValueError` if the key is not known.
"""
# Make the c- and m- prefixes case insensitive.
if key.lower().startswith('m-c-'):
key = 'M-C-' + key[4:]
elif key.lower().startswith('c-'):
key = 'C-' + key[2:]
elif key.lower().startswith('m-'):
key = 'M-' + key[2:]
# Lookup key.
try:
return PYMUX_TO_PROMPT_TOOLKIT_KEYS[key]
except KeyError:
if len(key) == 1:
return (key, )
else:
raise ValueError('Unknown key: %r' % (key, ))
|
[
"def",
"pymux_key_to_prompt_toolkit_key_sequence",
"(",
"key",
")",
":",
"# Make the c- and m- prefixes case insensitive.",
"if",
"key",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'m-c-'",
")",
":",
"key",
"=",
"'M-C-'",
"+",
"key",
"[",
"4",
":",
"]",
"elif",
"key",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'c-'",
")",
":",
"key",
"=",
"'C-'",
"+",
"key",
"[",
"2",
":",
"]",
"elif",
"key",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'m-'",
")",
":",
"key",
"=",
"'M-'",
"+",
"key",
"[",
"2",
":",
"]",
"# Lookup key.",
"try",
":",
"return",
"PYMUX_TO_PROMPT_TOOLKIT_KEYS",
"[",
"key",
"]",
"except",
"KeyError",
":",
"if",
"len",
"(",
"key",
")",
"==",
"1",
":",
"return",
"(",
"key",
",",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown key: %r'",
"%",
"(",
"key",
",",
")",
")"
] | 29.347826 | 14.73913 |
def _BuildOobLink(self, param, mode):
"""Builds out-of-band URL.
Gitkit API GetOobCode() is called and the returning code is combined
with Gitkit widget URL to building the out-of-band url.
Args:
param: dict of request.
mode: string, Gitkit widget mode to handle the oob action after user
clicks the oob url in the email.
Raises:
GitkitClientError: if oob code is not returned.
Returns:
A string of oob url.
"""
code = self.rpc_helper.GetOobCode(param)
if code:
parsed = list(parse.urlparse(self.widget_url))
query = dict(parse.parse_qsl(parsed[4]))
query.update({'mode': mode, 'oobCode': code})
try:
parsed[4] = parse.urlencode(query)
except AttributeError:
parsed[4] = urllib.urlencode(query)
return code, parse.urlunparse(parsed)
raise errors.GitkitClientError('invalid request')
|
[
"def",
"_BuildOobLink",
"(",
"self",
",",
"param",
",",
"mode",
")",
":",
"code",
"=",
"self",
".",
"rpc_helper",
".",
"GetOobCode",
"(",
"param",
")",
"if",
"code",
":",
"parsed",
"=",
"list",
"(",
"parse",
".",
"urlparse",
"(",
"self",
".",
"widget_url",
")",
")",
"query",
"=",
"dict",
"(",
"parse",
".",
"parse_qsl",
"(",
"parsed",
"[",
"4",
"]",
")",
")",
"query",
".",
"update",
"(",
"{",
"'mode'",
":",
"mode",
",",
"'oobCode'",
":",
"code",
"}",
")",
"try",
":",
"parsed",
"[",
"4",
"]",
"=",
"parse",
".",
"urlencode",
"(",
"query",
")",
"except",
"AttributeError",
":",
"parsed",
"[",
"4",
"]",
"=",
"urllib",
".",
"urlencode",
"(",
"query",
")",
"return",
"code",
",",
"parse",
".",
"urlunparse",
"(",
"parsed",
")",
"raise",
"errors",
".",
"GitkitClientError",
"(",
"'invalid request'",
")"
] | 28.548387 | 20 |
def load_messages(self, directory, catalogue):
"""
Loads translation found in a directory.
@type directory: string
@param directory: The directory to search
@type catalogue: MessageCatalogue
@param catalogue: The message catalogue to dump
@raises: ValueError
"""
if not os.path.isdir(directory):
raise ValueError("{0} is not a directory".format(directory))
for format, loader in list(self.loaders.items()):
extension = "{0}.{1}".format(catalogue.locale, format)
files = find_files(directory, "*.{0}".format(extension))
for file in files:
domain = file.split("/")[-1][:-1 * len(extension) - 1]
catalogue.add_catalogue(
loader.load(
file,
catalogue.locale,
domain))
|
[
"def",
"load_messages",
"(",
"self",
",",
"directory",
",",
"catalogue",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
")",
":",
"raise",
"ValueError",
"(",
"\"{0} is not a directory\"",
".",
"format",
"(",
"directory",
")",
")",
"for",
"format",
",",
"loader",
"in",
"list",
"(",
"self",
".",
"loaders",
".",
"items",
"(",
")",
")",
":",
"extension",
"=",
"\"{0}.{1}\"",
".",
"format",
"(",
"catalogue",
".",
"locale",
",",
"format",
")",
"files",
"=",
"find_files",
"(",
"directory",
",",
"\"*.{0}\"",
".",
"format",
"(",
"extension",
")",
")",
"for",
"file",
"in",
"files",
":",
"domain",
"=",
"file",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"1",
"*",
"len",
"(",
"extension",
")",
"-",
"1",
"]",
"catalogue",
".",
"add_catalogue",
"(",
"loader",
".",
"load",
"(",
"file",
",",
"catalogue",
".",
"locale",
",",
"domain",
")",
")"
] | 35.8 | 15.64 |
def send_search(self, url):
""" Queries the Twitter API with a given query string and \
stores the results internally. Also validates returned HTTP status \
code and throws an exception in case of invalid HTTP states. \
Example usage ``sendSearch('?q=One+Two&count=100')``
:param url: A string of the URL to send the query to
:raises: TwitterSearchException
"""
if not isinstance(url, str if py3k else basestring):
raise TwitterSearchException(1009)
endpoint = self._base_url + (self._search_url
if self.__order_is_search
else self._user_url)
r = requests.get(endpoint + url,
auth=self.__oauth,
proxies={"https": self.__proxy})
self.__response['meta'] = r.headers
self.check_http_status(r.status_code)
self.__response['content'] = r.json()
# update statistics if everything worked fine so far
seen_tweets = self.get_amount_of_tweets()
self.__statistics[0] += 1
self.__statistics[1] += seen_tweets
# call callback if available
if self.__callback:
self.__callback(self)
# if we've seen the correct amount of tweets there may be some more
# using IDs to request more results
# (former versions used page parameter)
# see https://dev.twitter.com/docs/working-with-timelines
# a leading ? char does "confuse" parse_qs()
if url[0] == '?':
url = url[1:]
given_count = int(parse_qs(url)['count'][0])
# Search API does have valid count values
if self.__order_is_search and seen_tweets == given_count:
self.__next_max_id = self.get_minimal_id()
# Timelines doesn't have valid count values
# see: https://dev.twitter.com/docs/faq
# see section: "How do I properly navigate a timeline?"
elif (not self.__order_is_search and
len(self.__response['content']) > 0):
self.__next_max_id = self.get_minimal_id()
else: # we got less tweets than requested -> no more results in API
self.__next_max_id = None
return self.__response['meta'], self.__response['content']
|
[
"def",
"send_search",
"(",
"self",
",",
"url",
")",
":",
"if",
"not",
"isinstance",
"(",
"url",
",",
"str",
"if",
"py3k",
"else",
"basestring",
")",
":",
"raise",
"TwitterSearchException",
"(",
"1009",
")",
"endpoint",
"=",
"self",
".",
"_base_url",
"+",
"(",
"self",
".",
"_search_url",
"if",
"self",
".",
"__order_is_search",
"else",
"self",
".",
"_user_url",
")",
"r",
"=",
"requests",
".",
"get",
"(",
"endpoint",
"+",
"url",
",",
"auth",
"=",
"self",
".",
"__oauth",
",",
"proxies",
"=",
"{",
"\"https\"",
":",
"self",
".",
"__proxy",
"}",
")",
"self",
".",
"__response",
"[",
"'meta'",
"]",
"=",
"r",
".",
"headers",
"self",
".",
"check_http_status",
"(",
"r",
".",
"status_code",
")",
"self",
".",
"__response",
"[",
"'content'",
"]",
"=",
"r",
".",
"json",
"(",
")",
"# update statistics if everything worked fine so far",
"seen_tweets",
"=",
"self",
".",
"get_amount_of_tweets",
"(",
")",
"self",
".",
"__statistics",
"[",
"0",
"]",
"+=",
"1",
"self",
".",
"__statistics",
"[",
"1",
"]",
"+=",
"seen_tweets",
"# call callback if available",
"if",
"self",
".",
"__callback",
":",
"self",
".",
"__callback",
"(",
"self",
")",
"# if we've seen the correct amount of tweets there may be some more",
"# using IDs to request more results",
"# (former versions used page parameter)",
"# see https://dev.twitter.com/docs/working-with-timelines",
"# a leading ? char does \"confuse\" parse_qs()",
"if",
"url",
"[",
"0",
"]",
"==",
"'?'",
":",
"url",
"=",
"url",
"[",
"1",
":",
"]",
"given_count",
"=",
"int",
"(",
"parse_qs",
"(",
"url",
")",
"[",
"'count'",
"]",
"[",
"0",
"]",
")",
"# Search API does have valid count values",
"if",
"self",
".",
"__order_is_search",
"and",
"seen_tweets",
"==",
"given_count",
":",
"self",
".",
"__next_max_id",
"=",
"self",
".",
"get_minimal_id",
"(",
")",
"# Timelines doesn't have valid count values",
"# see: https://dev.twitter.com/docs/faq",
"# see section: \"How do I properly navigate a timeline?\"",
"elif",
"(",
"not",
"self",
".",
"__order_is_search",
"and",
"len",
"(",
"self",
".",
"__response",
"[",
"'content'",
"]",
")",
">",
"0",
")",
":",
"self",
".",
"__next_max_id",
"=",
"self",
".",
"get_minimal_id",
"(",
")",
"else",
":",
"# we got less tweets than requested -> no more results in API",
"self",
".",
"__next_max_id",
"=",
"None",
"return",
"self",
".",
"__response",
"[",
"'meta'",
"]",
",",
"self",
".",
"__response",
"[",
"'content'",
"]"
] | 37.42623 | 19.688525 |
def show(cls, msg=None):
"""
Show the log interface on the page.
"""
if msg:
cls.add(msg)
cls.overlay.show()
cls.overlay.el.bind("click", lambda x: cls.hide())
cls.el.style.display = "block"
cls.bind()
|
[
"def",
"show",
"(",
"cls",
",",
"msg",
"=",
"None",
")",
":",
"if",
"msg",
":",
"cls",
".",
"add",
"(",
"msg",
")",
"cls",
".",
"overlay",
".",
"show",
"(",
")",
"cls",
".",
"overlay",
".",
"el",
".",
"bind",
"(",
"\"click\"",
",",
"lambda",
"x",
":",
"cls",
".",
"hide",
"(",
")",
")",
"cls",
".",
"el",
".",
"style",
".",
"display",
"=",
"\"block\"",
"cls",
".",
"bind",
"(",
")"
] | 22.333333 | 16.333333 |
def get_participant(participant_id):
"""Get the participant with the given id."""
try:
ppt = models.Participant.query.filter_by(id=participant_id).one()
except NoResultFound:
return error_response(
error_type="/participant GET: no participant found",
status=403)
# return the data
return success_response(field="participant",
data=ppt.__json__(),
request_type="participant get")
|
[
"def",
"get_participant",
"(",
"participant_id",
")",
":",
"try",
":",
"ppt",
"=",
"models",
".",
"Participant",
".",
"query",
".",
"filter_by",
"(",
"id",
"=",
"participant_id",
")",
".",
"one",
"(",
")",
"except",
"NoResultFound",
":",
"return",
"error_response",
"(",
"error_type",
"=",
"\"/participant GET: no participant found\"",
",",
"status",
"=",
"403",
")",
"# return the data",
"return",
"success_response",
"(",
"field",
"=",
"\"participant\"",
",",
"data",
"=",
"ppt",
".",
"__json__",
"(",
")",
",",
"request_type",
"=",
"\"participant get\"",
")"
] | 37.153846 | 17.615385 |
def _get_args(self, node, keywords):
"""
Intercept calls to get template and return our own node-specific
template
"""
args = super(ArcanaSlurmGraphPlugin, self)._get_args(
node, keywords)
# Substitute the template arg with the node-specific one
new_args = []
for name, arg in zip(keywords, args):
if name == 'template':
new_args.append(self._processor.slurm_template(node))
else:
new_args.append(arg)
return tuple(new_args)
|
[
"def",
"_get_args",
"(",
"self",
",",
"node",
",",
"keywords",
")",
":",
"args",
"=",
"super",
"(",
"ArcanaSlurmGraphPlugin",
",",
"self",
")",
".",
"_get_args",
"(",
"node",
",",
"keywords",
")",
"# Substitute the template arg with the node-specific one",
"new_args",
"=",
"[",
"]",
"for",
"name",
",",
"arg",
"in",
"zip",
"(",
"keywords",
",",
"args",
")",
":",
"if",
"name",
"==",
"'template'",
":",
"new_args",
".",
"append",
"(",
"self",
".",
"_processor",
".",
"slurm_template",
"(",
"node",
")",
")",
"else",
":",
"new_args",
".",
"append",
"(",
"arg",
")",
"return",
"tuple",
"(",
"new_args",
")"
] | 36.666667 | 14.266667 |
def get_onnx_variable_name(self, seed):
'''
Retrieve the variable ID of the given seed or create one if it is the first time of seeing this seed
'''
if seed in self.variable_name_mapping:
return self.variable_name_mapping[seed][-1]
else:
return self.get_unique_variable_name(seed)
|
[
"def",
"get_onnx_variable_name",
"(",
"self",
",",
"seed",
")",
":",
"if",
"seed",
"in",
"self",
".",
"variable_name_mapping",
":",
"return",
"self",
".",
"variable_name_mapping",
"[",
"seed",
"]",
"[",
"-",
"1",
"]",
"else",
":",
"return",
"self",
".",
"get_unique_variable_name",
"(",
"seed",
")"
] | 42.125 | 23.625 |
def write_usage(self, prog, args='', prefix='Usage: '):
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: the prefix for the first line.
"""
usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog)
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = ' ' * term_len(usage_prefix)
self.write(wrap_text(args, text_width,
initial_indent=usage_prefix,
subsequent_indent=indent))
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write('\n')
indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4)
self.write(wrap_text(args, text_width,
initial_indent=indent,
subsequent_indent=indent))
self.write('\n')
|
[
"def",
"write_usage",
"(",
"self",
",",
"prog",
",",
"args",
"=",
"''",
",",
"prefix",
"=",
"'Usage: '",
")",
":",
"usage_prefix",
"=",
"'%*s%s '",
"%",
"(",
"self",
".",
"current_indent",
",",
"prefix",
",",
"prog",
")",
"text_width",
"=",
"self",
".",
"width",
"-",
"self",
".",
"current_indent",
"if",
"text_width",
">=",
"(",
"term_len",
"(",
"usage_prefix",
")",
"+",
"20",
")",
":",
"# The arguments will fit to the right of the prefix.",
"indent",
"=",
"' '",
"*",
"term_len",
"(",
"usage_prefix",
")",
"self",
".",
"write",
"(",
"wrap_text",
"(",
"args",
",",
"text_width",
",",
"initial_indent",
"=",
"usage_prefix",
",",
"subsequent_indent",
"=",
"indent",
")",
")",
"else",
":",
"# The prefix is too long, put the arguments on the next line.",
"self",
".",
"write",
"(",
"usage_prefix",
")",
"self",
".",
"write",
"(",
"'\\n'",
")",
"indent",
"=",
"' '",
"*",
"(",
"max",
"(",
"self",
".",
"current_indent",
",",
"term_len",
"(",
"prefix",
")",
")",
"+",
"4",
")",
"self",
".",
"write",
"(",
"wrap_text",
"(",
"args",
",",
"text_width",
",",
"initial_indent",
"=",
"indent",
",",
"subsequent_indent",
"=",
"indent",
")",
")",
"self",
".",
"write",
"(",
"'\\n'",
")"
] | 43.730769 | 18.5 |
def _write_wrapped_codestream(self, ofile, box):
"""Write wrapped codestream."""
# Codestreams require a bit more care.
# Am I a raw codestream?
if len(self.box) == 0:
# Yes, just write the codestream box header plus all
# of myself out to file.
ofile.write(struct.pack('>I', self.length + 8))
ofile.write(b'jp2c')
with open(self.filename, 'rb') as ifile:
ofile.write(ifile.read())
return
# OK, I'm a jp2/jpx file. Need to find out where the raw codestream
# actually starts.
offset = box.offset
if offset == -1:
if self.box[1].brand == 'jpx ':
msg = ("The codestream box must have its offset and length "
"attributes fully specified if the file type brand is "
"JPX.")
raise IOError(msg)
# Find the first codestream in the file.
jp2c = [_box for _box in self.box if _box.box_id == 'jp2c']
offset = jp2c[0].offset
# Ready to write the codestream.
with open(self.filename, 'rb') as ifile:
ifile.seek(offset)
# Verify that the specified codestream is right.
read_buffer = ifile.read(8)
L, T = struct.unpack_from('>I4s', read_buffer, 0)
if T != b'jp2c':
msg = "Unable to locate the specified codestream."
raise IOError(msg)
if L == 0:
# The length of the box is presumed to last until the end of
# the file. Compute the effective length of the box.
L = os.path.getsize(ifile.name) - ifile.tell() + 8
elif L == 1:
# The length of the box is in the XL field, a 64-bit value.
read_buffer = ifile.read(8)
L, = struct.unpack('>Q', read_buffer)
ifile.seek(offset)
read_buffer = ifile.read(L)
ofile.write(read_buffer)
|
[
"def",
"_write_wrapped_codestream",
"(",
"self",
",",
"ofile",
",",
"box",
")",
":",
"# Codestreams require a bit more care.",
"# Am I a raw codestream?",
"if",
"len",
"(",
"self",
".",
"box",
")",
"==",
"0",
":",
"# Yes, just write the codestream box header plus all",
"# of myself out to file.",
"ofile",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'>I'",
",",
"self",
".",
"length",
"+",
"8",
")",
")",
"ofile",
".",
"write",
"(",
"b'jp2c'",
")",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
"as",
"ifile",
":",
"ofile",
".",
"write",
"(",
"ifile",
".",
"read",
"(",
")",
")",
"return",
"# OK, I'm a jp2/jpx file. Need to find out where the raw codestream",
"# actually starts.",
"offset",
"=",
"box",
".",
"offset",
"if",
"offset",
"==",
"-",
"1",
":",
"if",
"self",
".",
"box",
"[",
"1",
"]",
".",
"brand",
"==",
"'jpx '",
":",
"msg",
"=",
"(",
"\"The codestream box must have its offset and length \"",
"\"attributes fully specified if the file type brand is \"",
"\"JPX.\"",
")",
"raise",
"IOError",
"(",
"msg",
")",
"# Find the first codestream in the file.",
"jp2c",
"=",
"[",
"_box",
"for",
"_box",
"in",
"self",
".",
"box",
"if",
"_box",
".",
"box_id",
"==",
"'jp2c'",
"]",
"offset",
"=",
"jp2c",
"[",
"0",
"]",
".",
"offset",
"# Ready to write the codestream.",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
"as",
"ifile",
":",
"ifile",
".",
"seek",
"(",
"offset",
")",
"# Verify that the specified codestream is right.",
"read_buffer",
"=",
"ifile",
".",
"read",
"(",
"8",
")",
"L",
",",
"T",
"=",
"struct",
".",
"unpack_from",
"(",
"'>I4s'",
",",
"read_buffer",
",",
"0",
")",
"if",
"T",
"!=",
"b'jp2c'",
":",
"msg",
"=",
"\"Unable to locate the specified codestream.\"",
"raise",
"IOError",
"(",
"msg",
")",
"if",
"L",
"==",
"0",
":",
"# The length of the box is presumed to last until the end of",
"# the file. Compute the effective length of the box.",
"L",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"ifile",
".",
"name",
")",
"-",
"ifile",
".",
"tell",
"(",
")",
"+",
"8",
"elif",
"L",
"==",
"1",
":",
"# The length of the box is in the XL field, a 64-bit value.",
"read_buffer",
"=",
"ifile",
".",
"read",
"(",
"8",
")",
"L",
",",
"=",
"struct",
".",
"unpack",
"(",
"'>Q'",
",",
"read_buffer",
")",
"ifile",
".",
"seek",
"(",
"offset",
")",
"read_buffer",
"=",
"ifile",
".",
"read",
"(",
"L",
")",
"ofile",
".",
"write",
"(",
"read_buffer",
")"
] | 40.16 | 17.54 |
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
if self.apiopts.get('enable_sessions', True) is False:
url_blacklist = ['login', 'logout', 'minions', 'jobs']
else:
url_blacklist = []
urls = ((url, cls) for url, cls in six.iteritems(self.url_map)
if url not in url_blacklist)
for url, cls in urls:
setattr(self, url, cls())
|
[
"def",
"_setattr_url_map",
"(",
"self",
")",
":",
"if",
"self",
".",
"apiopts",
".",
"get",
"(",
"'enable_sessions'",
",",
"True",
")",
"is",
"False",
":",
"url_blacklist",
"=",
"[",
"'login'",
",",
"'logout'",
",",
"'minions'",
",",
"'jobs'",
"]",
"else",
":",
"url_blacklist",
"=",
"[",
"]",
"urls",
"=",
"(",
"(",
"url",
",",
"cls",
")",
"for",
"url",
",",
"cls",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"url_map",
")",
"if",
"url",
"not",
"in",
"url_blacklist",
")",
"for",
"url",
",",
"cls",
"in",
"urls",
":",
"setattr",
"(",
"self",
",",
"url",
",",
"cls",
"(",
")",
")"
] | 33.0625 | 23.3125 |
def parallel(view, dist='b', block=None, ordered=True, **flags):
"""Turn a function into a parallel remote function.
This method can be used for map:
In [1]: @parallel(view, block=True)
...: def func(a):
...: pass
"""
def parallel_function(f):
return ParallelFunction(view, f, dist=dist, block=block, ordered=ordered, **flags)
return parallel_function
|
[
"def",
"parallel",
"(",
"view",
",",
"dist",
"=",
"'b'",
",",
"block",
"=",
"None",
",",
"ordered",
"=",
"True",
",",
"*",
"*",
"flags",
")",
":",
"def",
"parallel_function",
"(",
"f",
")",
":",
"return",
"ParallelFunction",
"(",
"view",
",",
"f",
",",
"dist",
"=",
"dist",
",",
"block",
"=",
"block",
",",
"ordered",
"=",
"ordered",
",",
"*",
"*",
"flags",
")",
"return",
"parallel_function"
] | 30.076923 | 19.923077 |
def normalize_url(url: 'Union[URL, str]') -> 'URL':
"""Normalize url to make comparisons."""
url = URL(url)
return url.with_query(urlencode(sorted(parse_qsl(url.query_string))))
|
[
"def",
"normalize_url",
"(",
"url",
":",
"'Union[URL, str]'",
")",
"->",
"'URL'",
":",
"url",
"=",
"URL",
"(",
"url",
")",
"return",
"url",
".",
"with_query",
"(",
"urlencode",
"(",
"sorted",
"(",
"parse_qsl",
"(",
"url",
".",
"query_string",
")",
")",
")",
")"
] | 46.5 | 16.5 |
def add_file_metadata(self, fname):
"""
collects the files metadata - note that this will fail
with strange errors if network connection drops out to
shared folder, but it is better to stop the program
rather than do a try except otherwise you will get an
incomplete set of files.
"""
file_dict = {}
file_dict["fullfilename"] = fname
try:
file_dict["name"] = os.path.basename(fname)
file_dict["date"] = self.GetDateAsString(fname)
file_dict["size"] = os.path.getsize(fname)
file_dict["path"] = os.path.dirname(fname)
except IOError:
print('Error getting metadata for file')
self.fl_metadata.append(file_dict)
|
[
"def",
"add_file_metadata",
"(",
"self",
",",
"fname",
")",
":",
"file_dict",
"=",
"{",
"}",
"file_dict",
"[",
"\"fullfilename\"",
"]",
"=",
"fname",
"try",
":",
"file_dict",
"[",
"\"name\"",
"]",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
"file_dict",
"[",
"\"date\"",
"]",
"=",
"self",
".",
"GetDateAsString",
"(",
"fname",
")",
"file_dict",
"[",
"\"size\"",
"]",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"fname",
")",
"file_dict",
"[",
"\"path\"",
"]",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fname",
")",
"except",
"IOError",
":",
"print",
"(",
"'Error getting metadata for file'",
")",
"self",
".",
"fl_metadata",
".",
"append",
"(",
"file_dict",
")"
] | 38.3 | 16 |
def _apply_search_backrefs(pattern, flags=0):
"""Apply the search backrefs to the search pattern."""
if isinstance(pattern, (str, bytes)):
re_verbose = VERBOSE & flags
if flags & V0:
re_version = V0
elif flags & V1:
re_version = V1
else:
re_version = 0
if not (flags & DEBUG):
pattern = _cached_search_compile(pattern, re_verbose, re_version, type(pattern))
else: # pragma: no cover
pattern = _bregex_parse._SearchParser(pattern, re_verbose, re_version).parse()
elif isinstance(pattern, Bregex):
if flags:
raise ValueError("Cannot process flags argument with a compiled pattern")
pattern = pattern._pattern
elif isinstance(pattern, _REGEX_TYPE):
if flags:
raise ValueError("Cannot process flags argument with a compiled pattern!")
else:
raise TypeError("Not a string or compiled pattern!")
return pattern
|
[
"def",
"_apply_search_backrefs",
"(",
"pattern",
",",
"flags",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"pattern",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"re_verbose",
"=",
"VERBOSE",
"&",
"flags",
"if",
"flags",
"&",
"V0",
":",
"re_version",
"=",
"V0",
"elif",
"flags",
"&",
"V1",
":",
"re_version",
"=",
"V1",
"else",
":",
"re_version",
"=",
"0",
"if",
"not",
"(",
"flags",
"&",
"DEBUG",
")",
":",
"pattern",
"=",
"_cached_search_compile",
"(",
"pattern",
",",
"re_verbose",
",",
"re_version",
",",
"type",
"(",
"pattern",
")",
")",
"else",
":",
"# pragma: no cover",
"pattern",
"=",
"_bregex_parse",
".",
"_SearchParser",
"(",
"pattern",
",",
"re_verbose",
",",
"re_version",
")",
".",
"parse",
"(",
")",
"elif",
"isinstance",
"(",
"pattern",
",",
"Bregex",
")",
":",
"if",
"flags",
":",
"raise",
"ValueError",
"(",
"\"Cannot process flags argument with a compiled pattern\"",
")",
"pattern",
"=",
"pattern",
".",
"_pattern",
"elif",
"isinstance",
"(",
"pattern",
",",
"_REGEX_TYPE",
")",
":",
"if",
"flags",
":",
"raise",
"ValueError",
"(",
"\"Cannot process flags argument with a compiled pattern!\"",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Not a string or compiled pattern!\"",
")",
"return",
"pattern"
] | 38.8 | 19.6 |
def fitToSize(rect, targetWidth, targetHeight, bounds):
"""
Pads or crops a rectangle as necessary to achieve the target dimensions,
ensuring the modified rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Determine the difference between the current size and target size
x,y,w,h = rect
diffX = w - targetWidth
diffY = h - targetHeight
# Determine if we are cropping or padding the width
if diffX > 0:
cropLeft = math.floor(diffX / 2)
cropRight = diffX - cropLeft
x,y,w,h = cropRect((x,y,w,h), 0, 0, cropLeft, cropRight)
elif diffX < 0:
padLeft = math.floor(abs(diffX) / 2)
padRight = abs(diffX) - padLeft
x,y,w,h = padRect((x,y,w,h), 0, 0, padLeft, padRight, bounds, False)
# Determine if we are cropping or padding the height
if diffY > 0:
cropTop = math.floor(diffY / 2)
cropBottom = diffY - cropTop
x,y,w,h = cropRect((x,y,w,h), cropTop, cropBottom, 0, 0)
elif diffY < 0:
padTop = math.floor(abs(diffY) / 2)
padBottom = abs(diffY) - padTop
x,y,w,h = padRect((x,y,w,h), padTop, padBottom, 0, 0, bounds, False)
return (x,y,w,h)
|
[
"def",
"fitToSize",
"(",
"rect",
",",
"targetWidth",
",",
"targetHeight",
",",
"bounds",
")",
":",
"# Determine the difference between the current size and target size",
"x",
",",
"y",
",",
"w",
",",
"h",
"=",
"rect",
"diffX",
"=",
"w",
"-",
"targetWidth",
"diffY",
"=",
"h",
"-",
"targetHeight",
"# Determine if we are cropping or padding the width",
"if",
"diffX",
">",
"0",
":",
"cropLeft",
"=",
"math",
".",
"floor",
"(",
"diffX",
"/",
"2",
")",
"cropRight",
"=",
"diffX",
"-",
"cropLeft",
"x",
",",
"y",
",",
"w",
",",
"h",
"=",
"cropRect",
"(",
"(",
"x",
",",
"y",
",",
"w",
",",
"h",
")",
",",
"0",
",",
"0",
",",
"cropLeft",
",",
"cropRight",
")",
"elif",
"diffX",
"<",
"0",
":",
"padLeft",
"=",
"math",
".",
"floor",
"(",
"abs",
"(",
"diffX",
")",
"/",
"2",
")",
"padRight",
"=",
"abs",
"(",
"diffX",
")",
"-",
"padLeft",
"x",
",",
"y",
",",
"w",
",",
"h",
"=",
"padRect",
"(",
"(",
"x",
",",
"y",
",",
"w",
",",
"h",
")",
",",
"0",
",",
"0",
",",
"padLeft",
",",
"padRight",
",",
"bounds",
",",
"False",
")",
"# Determine if we are cropping or padding the height",
"if",
"diffY",
">",
"0",
":",
"cropTop",
"=",
"math",
".",
"floor",
"(",
"diffY",
"/",
"2",
")",
"cropBottom",
"=",
"diffY",
"-",
"cropTop",
"x",
",",
"y",
",",
"w",
",",
"h",
"=",
"cropRect",
"(",
"(",
"x",
",",
"y",
",",
"w",
",",
"h",
")",
",",
"cropTop",
",",
"cropBottom",
",",
"0",
",",
"0",
")",
"elif",
"diffY",
"<",
"0",
":",
"padTop",
"=",
"math",
".",
"floor",
"(",
"abs",
"(",
"diffY",
")",
"/",
"2",
")",
"padBottom",
"=",
"abs",
"(",
"diffY",
")",
"-",
"padTop",
"x",
",",
"y",
",",
"w",
",",
"h",
"=",
"padRect",
"(",
"(",
"x",
",",
"y",
",",
"w",
",",
"h",
")",
",",
"padTop",
",",
"padBottom",
",",
"0",
",",
"0",
",",
"bounds",
",",
"False",
")",
"return",
"(",
"x",
",",
"y",
",",
"w",
",",
"h",
")"
] | 33.470588 | 20.176471 |
def list(self, href):
"""
list of files and directories at remote server
:param href: remote folder
:return: list(folders, files) and list(None,None) if folder doesn't exist
"""
for iTry in range(TRYINGS):
logger.info(u("list(%s): %s") % (iTry, href))
folders = None
files = None
try:
href = os.path.join(u("/"), _(href))
conn = self.getConnection()
conn.request("PROPFIND", _encode_utf8(href), u(""), self.getHeaders())
response = conn.getresponse()
checkResponse(response)
data = response.read()
if data == b('list: folder was not found'):
return folders, files
elif data == b('You are not authorized to see this!'):
return folders, files
else:
try:
dom = xml.dom.minidom.parseString(data)
responces = dom.getElementsByTagNameNS("DAV:", "response")
folders = {}
files = {}
for dom in responces:
response = RemoteObject(dom, self, href)
if response.href != href:
if response.isFolder():
folders[response.href] = response
else:
files[response.href] = response
except xml.parsers.expat.ExpatError:
e = sys.exc_info()[1]
logger.exception(e)
return folders, files
except ConnectionException:
raise
except Exception:
e = sys.exc_info()[1]
logger.exception(e)
return folders, files
|
[
"def",
"list",
"(",
"self",
",",
"href",
")",
":",
"for",
"iTry",
"in",
"range",
"(",
"TRYINGS",
")",
":",
"logger",
".",
"info",
"(",
"u",
"(",
"\"list(%s): %s\"",
")",
"%",
"(",
"iTry",
",",
"href",
")",
")",
"folders",
"=",
"None",
"files",
"=",
"None",
"try",
":",
"href",
"=",
"os",
".",
"path",
".",
"join",
"(",
"u",
"(",
"\"/\"",
")",
",",
"_",
"(",
"href",
")",
")",
"conn",
"=",
"self",
".",
"getConnection",
"(",
")",
"conn",
".",
"request",
"(",
"\"PROPFIND\"",
",",
"_encode_utf8",
"(",
"href",
")",
",",
"u",
"(",
"\"\"",
")",
",",
"self",
".",
"getHeaders",
"(",
")",
")",
"response",
"=",
"conn",
".",
"getresponse",
"(",
")",
"checkResponse",
"(",
"response",
")",
"data",
"=",
"response",
".",
"read",
"(",
")",
"if",
"data",
"==",
"b",
"(",
"'list: folder was not found'",
")",
":",
"return",
"folders",
",",
"files",
"elif",
"data",
"==",
"b",
"(",
"'You are not authorized to see this!'",
")",
":",
"return",
"folders",
",",
"files",
"else",
":",
"try",
":",
"dom",
"=",
"xml",
".",
"dom",
".",
"minidom",
".",
"parseString",
"(",
"data",
")",
"responces",
"=",
"dom",
".",
"getElementsByTagNameNS",
"(",
"\"DAV:\"",
",",
"\"response\"",
")",
"folders",
"=",
"{",
"}",
"files",
"=",
"{",
"}",
"for",
"dom",
"in",
"responces",
":",
"response",
"=",
"RemoteObject",
"(",
"dom",
",",
"self",
",",
"href",
")",
"if",
"response",
".",
"href",
"!=",
"href",
":",
"if",
"response",
".",
"isFolder",
"(",
")",
":",
"folders",
"[",
"response",
".",
"href",
"]",
"=",
"response",
"else",
":",
"files",
"[",
"response",
".",
"href",
"]",
"=",
"response",
"except",
"xml",
".",
"parsers",
".",
"expat",
".",
"ExpatError",
":",
"e",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"logger",
".",
"exception",
"(",
"e",
")",
"return",
"folders",
",",
"files",
"except",
"ConnectionException",
":",
"raise",
"except",
"Exception",
":",
"e",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"logger",
".",
"exception",
"(",
"e",
")",
"return",
"folders",
",",
"files"
] | 43.477273 | 13.159091 |
def use_comparative_authorization_view(self):
"""Pass through to provider AuthorizationLookupSession.use_comparative_authorization_view"""
self._object_views['authorization'] = COMPARATIVE
# self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_authorization_view()
except AttributeError:
pass
|
[
"def",
"use_comparative_authorization_view",
"(",
"self",
")",
":",
"self",
".",
"_object_views",
"[",
"'authorization'",
"]",
"=",
"COMPARATIVE",
"# self._get_provider_session('authorization_lookup_session') # To make sure the session is tracked",
"for",
"session",
"in",
"self",
".",
"_get_provider_sessions",
"(",
")",
":",
"try",
":",
"session",
".",
"use_comparative_authorization_view",
"(",
")",
"except",
"AttributeError",
":",
"pass"
] | 54.555556 | 19 |
def Gf(counts):
r'''Estimates the ideal-gas Gibbs energy of formation at 298.15 K of an
organic compound using the Joback method as a function of chemical
structure only.
.. math::
G_{formation} = 53.88 + \sum {G_{f,i}}
In the above equation, Gibbs energy of formation is calculated in
kJ/mol; it is converted to J/mol here.
328 compounds were used by Joback in this determination, with an
absolute average error of 2.0 kcal/mol, standard devaition 4.37
kcal/mol, and AARE of 15.7%.
Parameters
----------
counts : dict
Dictionary of Joback groups present (numerically indexed) and their
counts, [-]
Returns
-------
Gf : float
Estimated ideal-gas Gibbs energy of formation at 298.15 K, [J/mol]
Examples
--------
>>> Joback.Gf({1: 2, 24: 1})
-154540.00000000003
'''
tot = 0.0
for group, count in counts.items():
tot += joback_groups_id_dict[group].Gform*count
Gf = 53.88 + tot
return Gf*1000
|
[
"def",
"Gf",
"(",
"counts",
")",
":",
"tot",
"=",
"0.0",
"for",
"group",
",",
"count",
"in",
"counts",
".",
"items",
"(",
")",
":",
"tot",
"+=",
"joback_groups_id_dict",
"[",
"group",
"]",
".",
"Gform",
"*",
"count",
"Gf",
"=",
"53.88",
"+",
"tot",
"return",
"Gf",
"*",
"1000"
] | 32.388889 | 23.666667 |
def constant(duration: int, amp: complex, name: str = None) -> SamplePulse:
"""Generates constant-sampled `SamplePulse`.
Applies `left` sampling strategy to generate discrete pulse from continuous function.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Complex pulse amplitude.
name: Name of pulse.
"""
return _sampled_constant_pulse(duration, amp, name=name)
|
[
"def",
"constant",
"(",
"duration",
":",
"int",
",",
"amp",
":",
"complex",
",",
"name",
":",
"str",
"=",
"None",
")",
"->",
"SamplePulse",
":",
"return",
"_sampled_constant_pulse",
"(",
"duration",
",",
"amp",
",",
"name",
"=",
"name",
")"
] | 37.818182 | 23 |
def order_search(self, article_code, **kwargs):
'''taobao.vas.order.search 订单记录导出
用于ISV查询自己名下的应用及收费项目的订单记录。目前所有应用调用此接口的频率限制为200次/分钟,即每分钟内,所有应用调用此接口的次数加起来最多为200次。'''
request = TOPRequest('taobao.vas.order.search')
request['article_code'] = article_code
for k, v in kwargs.iteritems():
if k not in ('item_code', 'nick', 'start_created', 'end_created', 'biz_type', 'biz_order_id', 'order_id', 'page_size','page_no') and v==None: continue
request[k] = v
self.create(self.execute(request), fields=['article_biz_orders','total_item'], models={'article_biz_orders':ArticleBizOrder})
return self.article_biz_orders
|
[
"def",
"order_search",
"(",
"self",
",",
"article_code",
",",
"*",
"*",
"kwargs",
")",
":",
"request",
"=",
"TOPRequest",
"(",
"'taobao.vas.order.search'",
")",
"request",
"[",
"'article_code'",
"]",
"=",
"article_code",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"not",
"in",
"(",
"'item_code'",
",",
"'nick'",
",",
"'start_created'",
",",
"'end_created'",
",",
"'biz_type'",
",",
"'biz_order_id'",
",",
"'order_id'",
",",
"'page_size'",
",",
"'page_no'",
")",
"and",
"v",
"==",
"None",
":",
"continue",
"request",
"[",
"k",
"]",
"=",
"v",
"self",
".",
"create",
"(",
"self",
".",
"execute",
"(",
"request",
")",
",",
"fields",
"=",
"[",
"'article_biz_orders'",
",",
"'total_item'",
"]",
",",
"models",
"=",
"{",
"'article_biz_orders'",
":",
"ArticleBizOrder",
"}",
")",
"return",
"self",
".",
"article_biz_orders"
] | 62.272727 | 31.181818 |
async def _request(
self,
method: str,
endpoint: str,
*,
headers: dict = None,
params: dict = None,
json: dict = None,
ssl: bool = True) -> dict:
"""Wrap the generic request method to add access token, etc."""
return await self._client_request(
method,
'{0}/{1}'.format(self._host, endpoint),
access_token=self._access_token,
access_token_expiration=self._access_token_expiration,
headers=headers,
params=params,
json=json,
ssl=ssl)
|
[
"async",
"def",
"_request",
"(",
"self",
",",
"method",
":",
"str",
",",
"endpoint",
":",
"str",
",",
"*",
",",
"headers",
":",
"dict",
"=",
"None",
",",
"params",
":",
"dict",
"=",
"None",
",",
"json",
":",
"dict",
"=",
"None",
",",
"ssl",
":",
"bool",
"=",
"True",
")",
"->",
"dict",
":",
"return",
"await",
"self",
".",
"_client_request",
"(",
"method",
",",
"'{0}/{1}'",
".",
"format",
"(",
"self",
".",
"_host",
",",
"endpoint",
")",
",",
"access_token",
"=",
"self",
".",
"_access_token",
",",
"access_token_expiration",
"=",
"self",
".",
"_access_token_expiration",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
",",
"json",
"=",
"json",
",",
"ssl",
"=",
"ssl",
")"
] | 32.736842 | 13.421053 |
def get(name, required=False, default=empty, type=None):
"""Generic getter for environment variables. Handles defaults,
required-ness, and what type to expect.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
:param type: The type of variable expected.
:param type: str or type
"""
fn = {
'int': env_int,
int: env_int,
'bool': env_bool,
bool: env_bool,
'string': env_string,
str: env_string,
'list': env_list,
list: env_list,
'timestamp': env_timestamp,
datetime.time: env_timestamp,
'datetime': env_iso8601,
datetime.datetime: env_iso8601,
}.get(type, env_string)
return fn(name, default=default, required=required)
|
[
"def",
"get",
"(",
"name",
",",
"required",
"=",
"False",
",",
"default",
"=",
"empty",
",",
"type",
"=",
"None",
")",
":",
"fn",
"=",
"{",
"'int'",
":",
"env_int",
",",
"int",
":",
"env_int",
",",
"'bool'",
":",
"env_bool",
",",
"bool",
":",
"env_bool",
",",
"'string'",
":",
"env_string",
",",
"str",
":",
"env_string",
",",
"'list'",
":",
"env_list",
",",
"list",
":",
"env_list",
",",
"'timestamp'",
":",
"env_timestamp",
",",
"datetime",
".",
"time",
":",
"env_timestamp",
",",
"'datetime'",
":",
"env_iso8601",
",",
"datetime",
".",
"datetime",
":",
"env_iso8601",
",",
"}",
".",
"get",
"(",
"type",
",",
"env_string",
")",
"return",
"fn",
"(",
"name",
",",
"default",
"=",
"default",
",",
"required",
"=",
"required",
")"
] | 28.692308 | 21.282051 |
def _get_dbid2goids(associations):
"""Return gene2go data for user-specified taxids."""
id2gos = cx.defaultdict(set)
for ntd in associations:
id2gos[ntd.DB_ID].add(ntd.GO_ID)
return dict(id2gos)
|
[
"def",
"_get_dbid2goids",
"(",
"associations",
")",
":",
"id2gos",
"=",
"cx",
".",
"defaultdict",
"(",
"set",
")",
"for",
"ntd",
"in",
"associations",
":",
"id2gos",
"[",
"ntd",
".",
"DB_ID",
"]",
".",
"add",
"(",
"ntd",
".",
"GO_ID",
")",
"return",
"dict",
"(",
"id2gos",
")"
] | 38.833333 | 5.833333 |
def log_very_verbose(self, message):
"""
Logs a message only when logging level is very verbose.
:param str|list[str] message: The message.
"""
if self.get_verbosity() >= Output.VERBOSITY_VERY_VERBOSE:
self.writeln(message)
|
[
"def",
"log_very_verbose",
"(",
"self",
",",
"message",
")",
":",
"if",
"self",
".",
"get_verbosity",
"(",
")",
">=",
"Output",
".",
"VERBOSITY_VERY_VERBOSE",
":",
"self",
".",
"writeln",
"(",
"message",
")"
] | 33.625 | 13.625 |
def folderitems(self):
"""Generate folderitems for each version
"""
items = []
# get the snapshots
snapshots = get_snapshots(self.context)
# reverse the order to get the most recent change first
snapshots = list(reversed(snapshots))
# set the total number of items
self.total = len(snapshots)
# slice a batch
batch = snapshots[self.limit_from:self.limit_from+self.pagesize]
for snapshot in batch:
item = self.make_empty_item(**snapshot)
# get the version of the snapshot
version = get_snapshot_version(self.context, snapshot)
# Version
item["version"] = version
# get the metadata of the diff
metadata = get_snapshot_metadata(snapshot)
# Modification Date
m_date = metadata.get("modified")
item["modified"] = self.to_localized_time(m_date)
# Actor
actor = metadata.get("actor")
item["actor"] = actor
# Fullname
properties = api.get_user_properties(actor)
item["fullname"] = properties.get("fullname", actor)
# Roles
roles = metadata.get("roles", [])
item["roles"] = ", ".join(roles)
# Remote Address
remote_address = metadata.get("remote_address")
item["remote_address"] = remote_address
# Action
action = metadata.get("action")
item["action"] = self.translate_state(action)
# Review State
review_state = metadata.get("review_state")
item["review_state"] = self.translate_state(review_state)
# get the previous snapshot
prev_snapshot = get_snapshot_by_version(self.context, version-1)
if prev_snapshot:
prev_metadata = get_snapshot_metadata(prev_snapshot)
prev_review_state = prev_metadata.get("review_state")
if prev_review_state != review_state:
item["replace"]["review_state"] = "{} → {}".format(
self.translate_state(prev_review_state),
self.translate_state(review_state))
# Rendered Diff
diff = compare_snapshots(snapshot, prev_snapshot)
item["diff"] = self.render_diff(diff)
# append the item
items.append(item)
return items
|
[
"def",
"folderitems",
"(",
"self",
")",
":",
"items",
"=",
"[",
"]",
"# get the snapshots",
"snapshots",
"=",
"get_snapshots",
"(",
"self",
".",
"context",
")",
"# reverse the order to get the most recent change first",
"snapshots",
"=",
"list",
"(",
"reversed",
"(",
"snapshots",
")",
")",
"# set the total number of items",
"self",
".",
"total",
"=",
"len",
"(",
"snapshots",
")",
"# slice a batch",
"batch",
"=",
"snapshots",
"[",
"self",
".",
"limit_from",
":",
"self",
".",
"limit_from",
"+",
"self",
".",
"pagesize",
"]",
"for",
"snapshot",
"in",
"batch",
":",
"item",
"=",
"self",
".",
"make_empty_item",
"(",
"*",
"*",
"snapshot",
")",
"# get the version of the snapshot",
"version",
"=",
"get_snapshot_version",
"(",
"self",
".",
"context",
",",
"snapshot",
")",
"# Version",
"item",
"[",
"\"version\"",
"]",
"=",
"version",
"# get the metadata of the diff",
"metadata",
"=",
"get_snapshot_metadata",
"(",
"snapshot",
")",
"# Modification Date",
"m_date",
"=",
"metadata",
".",
"get",
"(",
"\"modified\"",
")",
"item",
"[",
"\"modified\"",
"]",
"=",
"self",
".",
"to_localized_time",
"(",
"m_date",
")",
"# Actor",
"actor",
"=",
"metadata",
".",
"get",
"(",
"\"actor\"",
")",
"item",
"[",
"\"actor\"",
"]",
"=",
"actor",
"# Fullname",
"properties",
"=",
"api",
".",
"get_user_properties",
"(",
"actor",
")",
"item",
"[",
"\"fullname\"",
"]",
"=",
"properties",
".",
"get",
"(",
"\"fullname\"",
",",
"actor",
")",
"# Roles",
"roles",
"=",
"metadata",
".",
"get",
"(",
"\"roles\"",
",",
"[",
"]",
")",
"item",
"[",
"\"roles\"",
"]",
"=",
"\", \"",
".",
"join",
"(",
"roles",
")",
"# Remote Address",
"remote_address",
"=",
"metadata",
".",
"get",
"(",
"\"remote_address\"",
")",
"item",
"[",
"\"remote_address\"",
"]",
"=",
"remote_address",
"# Action",
"action",
"=",
"metadata",
".",
"get",
"(",
"\"action\"",
")",
"item",
"[",
"\"action\"",
"]",
"=",
"self",
".",
"translate_state",
"(",
"action",
")",
"# Review State",
"review_state",
"=",
"metadata",
".",
"get",
"(",
"\"review_state\"",
")",
"item",
"[",
"\"review_state\"",
"]",
"=",
"self",
".",
"translate_state",
"(",
"review_state",
")",
"# get the previous snapshot",
"prev_snapshot",
"=",
"get_snapshot_by_version",
"(",
"self",
".",
"context",
",",
"version",
"-",
"1",
")",
"if",
"prev_snapshot",
":",
"prev_metadata",
"=",
"get_snapshot_metadata",
"(",
"prev_snapshot",
")",
"prev_review_state",
"=",
"prev_metadata",
".",
"get",
"(",
"\"review_state\"",
")",
"if",
"prev_review_state",
"!=",
"review_state",
":",
"item",
"[",
"\"replace\"",
"]",
"[",
"\"review_state\"",
"]",
"=",
"\"{} → {}\"",
".",
"format",
"(",
"self",
".",
"translate_state",
"(",
"prev_review_state",
")",
",",
"self",
".",
"translate_state",
"(",
"review_state",
")",
")",
"# Rendered Diff",
"diff",
"=",
"compare_snapshots",
"(",
"snapshot",
",",
"prev_snapshot",
")",
"item",
"[",
"\"diff\"",
"]",
"=",
"self",
".",
"render_diff",
"(",
"diff",
")",
"# append the item",
"items",
".",
"append",
"(",
"item",
")",
"return",
"items"
] | 34.914286 | 19.557143 |
def force_plot(base_value, shap_values, features=None, feature_names=None, out_names=None, link="identity",
plot_cmap="RdBu", matplotlib=False, show=True, figsize=(20,3), ordering_keys=None, ordering_keys_time_format=None,
text_rotation=0):
""" Visualize the given SHAP values with an additive force layout.
Parameters
----------
base_value : float
This is the reference value that the feature contributions start from. For SHAP values it should
be the value of explainer.expected_value.
shap_values : numpy.array
Matrix of SHAP values (# features) or (# samples x # features). If this is a 1D array then a single
force plot will be drawn, if it is a 2D array then a stacked force plot will be drawn.
features : numpy.array
Matrix of feature values (# features) or (# samples x # features). This provides the values of all the
features, and should be the same shape as the shap_values argument.
feature_names : list
List of feature names (# features).
out_names : str
The name of the outout of the model (plural to support multi-output plotting in the future).
link : "identity" or "logit"
The transformation used when drawing the tick mark labels. Using logit will change log-odds numbers
into probabilities.
matplotlib : bool
Whether to use the default Javascript output, or the (less developed) matplotlib output. Using matplotlib
can be helpful in scenarios where rendering Javascript/HTML is inconvenient.
"""
# auto unwrap the base_value
if type(base_value) == np.ndarray and len(base_value) == 1:
base_value = base_value[0]
if (type(base_value) == np.ndarray or type(base_value) == list):
if type(shap_values) != list or len(shap_values) != len(base_value):
raise Exception("In v0.20 force_plot now requires the base value as the first parameter! " \
"Try shap.force_plot(explainer.expected_value, shap_values) or " \
"for multi-output models try " \
"shap.force_plot(explainer.expected_value[0], shap_values[0]).")
assert not type(shap_values) == list, "The shap_values arg looks looks multi output, try shap_values[i]."
link = convert_to_link(link)
if type(shap_values) != np.ndarray:
return visualize(shap_values)
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = list(features.columns)
features = features.values
elif str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index)
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif features is not None and len(features.shape) == 1 and feature_names is None:
feature_names = features
features = None
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, (1, len(shap_values)))
if out_names is None:
out_names = ["output value"]
elif type(out_names) == str:
out_names = [out_names]
if shap_values.shape[0] == 1:
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
features = ["" for _ in range(len(feature_names))]
if type(features) == np.ndarray:
features = features.flatten()
# check that the shape of the shap_values and features match
if len(features) != shap_values.shape[1]:
msg = "Length of features is not equal to the length of shap_values!"
if len(features) == shap_values.shape[1] - 1:
msg += " You might be using an old format shap_values array with the base value " \
"as the last column. In this case just pass the array without the last column."
raise Exception(msg)
instance = Instance(np.zeros((1, len(feature_names))), features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[0, :]) + base_value,
shap_values[0, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.zeros((1, len(feature_names))), list(feature_names))
)
return visualize(e, plot_cmap, matplotlib, figsize=figsize, show=show, text_rotation=text_rotation)
else:
if matplotlib:
raise Exception("matplotlib = True is not yet supported for force plots with multiple samples!")
if shap_values.shape[0] > 3000:
warnings.warn("shap.force_plot is slow for many thousands of rows, try subsampling your data.")
exps = []
for i in range(shap_values.shape[0]):
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
display_features = ["" for i in range(len(feature_names))]
else:
display_features = features[i, :]
instance = Instance(np.ones((1, len(feature_names))), display_features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[i, :]) + base_value,
shap_values[i, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.ones((1, len(feature_names))), list(feature_names))
)
exps.append(e)
return visualize(
exps,
plot_cmap=plot_cmap,
ordering_keys=ordering_keys,
ordering_keys_time_format=ordering_keys_time_format,
text_rotation=text_rotation
)
|
[
"def",
"force_plot",
"(",
"base_value",
",",
"shap_values",
",",
"features",
"=",
"None",
",",
"feature_names",
"=",
"None",
",",
"out_names",
"=",
"None",
",",
"link",
"=",
"\"identity\"",
",",
"plot_cmap",
"=",
"\"RdBu\"",
",",
"matplotlib",
"=",
"False",
",",
"show",
"=",
"True",
",",
"figsize",
"=",
"(",
"20",
",",
"3",
")",
",",
"ordering_keys",
"=",
"None",
",",
"ordering_keys_time_format",
"=",
"None",
",",
"text_rotation",
"=",
"0",
")",
":",
"# auto unwrap the base_value",
"if",
"type",
"(",
"base_value",
")",
"==",
"np",
".",
"ndarray",
"and",
"len",
"(",
"base_value",
")",
"==",
"1",
":",
"base_value",
"=",
"base_value",
"[",
"0",
"]",
"if",
"(",
"type",
"(",
"base_value",
")",
"==",
"np",
".",
"ndarray",
"or",
"type",
"(",
"base_value",
")",
"==",
"list",
")",
":",
"if",
"type",
"(",
"shap_values",
")",
"!=",
"list",
"or",
"len",
"(",
"shap_values",
")",
"!=",
"len",
"(",
"base_value",
")",
":",
"raise",
"Exception",
"(",
"\"In v0.20 force_plot now requires the base value as the first parameter! \"",
"\"Try shap.force_plot(explainer.expected_value, shap_values) or \"",
"\"for multi-output models try \"",
"\"shap.force_plot(explainer.expected_value[0], shap_values[0]).\"",
")",
"assert",
"not",
"type",
"(",
"shap_values",
")",
"==",
"list",
",",
"\"The shap_values arg looks looks multi output, try shap_values[i].\"",
"link",
"=",
"convert_to_link",
"(",
"link",
")",
"if",
"type",
"(",
"shap_values",
")",
"!=",
"np",
".",
"ndarray",
":",
"return",
"visualize",
"(",
"shap_values",
")",
"# convert from a DataFrame or other types",
"if",
"str",
"(",
"type",
"(",
"features",
")",
")",
"==",
"\"<class 'pandas.core.frame.DataFrame'>\"",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"list",
"(",
"features",
".",
"columns",
")",
"features",
"=",
"features",
".",
"values",
"elif",
"str",
"(",
"type",
"(",
"features",
")",
")",
"==",
"\"<class 'pandas.core.series.Series'>\"",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"list",
"(",
"features",
".",
"index",
")",
"features",
"=",
"features",
".",
"values",
"elif",
"isinstance",
"(",
"features",
",",
"list",
")",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"features",
"features",
"=",
"None",
"elif",
"features",
"is",
"not",
"None",
"and",
"len",
"(",
"features",
".",
"shape",
")",
"==",
"1",
"and",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"features",
"features",
"=",
"None",
"if",
"len",
"(",
"shap_values",
".",
"shape",
")",
"==",
"1",
":",
"shap_values",
"=",
"np",
".",
"reshape",
"(",
"shap_values",
",",
"(",
"1",
",",
"len",
"(",
"shap_values",
")",
")",
")",
"if",
"out_names",
"is",
"None",
":",
"out_names",
"=",
"[",
"\"output value\"",
"]",
"elif",
"type",
"(",
"out_names",
")",
"==",
"str",
":",
"out_names",
"=",
"[",
"out_names",
"]",
"if",
"shap_values",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"[",
"labels",
"[",
"'FEATURE'",
"]",
"%",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"shap_values",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"if",
"features",
"is",
"None",
":",
"features",
"=",
"[",
"\"\"",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"feature_names",
")",
")",
"]",
"if",
"type",
"(",
"features",
")",
"==",
"np",
".",
"ndarray",
":",
"features",
"=",
"features",
".",
"flatten",
"(",
")",
"# check that the shape of the shap_values and features match",
"if",
"len",
"(",
"features",
")",
"!=",
"shap_values",
".",
"shape",
"[",
"1",
"]",
":",
"msg",
"=",
"\"Length of features is not equal to the length of shap_values!\"",
"if",
"len",
"(",
"features",
")",
"==",
"shap_values",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
":",
"msg",
"+=",
"\" You might be using an old format shap_values array with the base value \"",
"\"as the last column. In this case just pass the array without the last column.\"",
"raise",
"Exception",
"(",
"msg",
")",
"instance",
"=",
"Instance",
"(",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"len",
"(",
"feature_names",
")",
")",
")",
",",
"features",
")",
"e",
"=",
"AdditiveExplanation",
"(",
"base_value",
",",
"np",
".",
"sum",
"(",
"shap_values",
"[",
"0",
",",
":",
"]",
")",
"+",
"base_value",
",",
"shap_values",
"[",
"0",
",",
":",
"]",
",",
"None",
",",
"instance",
",",
"link",
",",
"Model",
"(",
"None",
",",
"out_names",
")",
",",
"DenseData",
"(",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"len",
"(",
"feature_names",
")",
")",
")",
",",
"list",
"(",
"feature_names",
")",
")",
")",
"return",
"visualize",
"(",
"e",
",",
"plot_cmap",
",",
"matplotlib",
",",
"figsize",
"=",
"figsize",
",",
"show",
"=",
"show",
",",
"text_rotation",
"=",
"text_rotation",
")",
"else",
":",
"if",
"matplotlib",
":",
"raise",
"Exception",
"(",
"\"matplotlib = True is not yet supported for force plots with multiple samples!\"",
")",
"if",
"shap_values",
".",
"shape",
"[",
"0",
"]",
">",
"3000",
":",
"warnings",
".",
"warn",
"(",
"\"shap.force_plot is slow for many thousands of rows, try subsampling your data.\"",
")",
"exps",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"shap_values",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"[",
"labels",
"[",
"'FEATURE'",
"]",
"%",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"shap_values",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"if",
"features",
"is",
"None",
":",
"display_features",
"=",
"[",
"\"\"",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"feature_names",
")",
")",
"]",
"else",
":",
"display_features",
"=",
"features",
"[",
"i",
",",
":",
"]",
"instance",
"=",
"Instance",
"(",
"np",
".",
"ones",
"(",
"(",
"1",
",",
"len",
"(",
"feature_names",
")",
")",
")",
",",
"display_features",
")",
"e",
"=",
"AdditiveExplanation",
"(",
"base_value",
",",
"np",
".",
"sum",
"(",
"shap_values",
"[",
"i",
",",
":",
"]",
")",
"+",
"base_value",
",",
"shap_values",
"[",
"i",
",",
":",
"]",
",",
"None",
",",
"instance",
",",
"link",
",",
"Model",
"(",
"None",
",",
"out_names",
")",
",",
"DenseData",
"(",
"np",
".",
"ones",
"(",
"(",
"1",
",",
"len",
"(",
"feature_names",
")",
")",
")",
",",
"list",
"(",
"feature_names",
")",
")",
")",
"exps",
".",
"append",
"(",
"e",
")",
"return",
"visualize",
"(",
"exps",
",",
"plot_cmap",
"=",
"plot_cmap",
",",
"ordering_keys",
"=",
"ordering_keys",
",",
"ordering_keys_time_format",
"=",
"ordering_keys_time_format",
",",
"text_rotation",
"=",
"text_rotation",
")"
] | 41.744828 | 26.262069 |
def _strip_placeholder_braces(p_matchobj):
"""
Returns string with conditional braces around placeholder stripped and
percent sign glued into placeholder character.
Returned string is composed from 'start', 'before', 'placeholder', 'after',
'whitespace', and 'end' match-groups of p_matchobj. Conditional braces are
stripped from 'before' and 'after' groups. 'whitespace', 'start', and 'end'
groups are preserved without any change.
Using this function as an 'repl' argument in re.sub it is possible to turn:
%{(}B{)}
into:
(%B)
"""
before = p_matchobj.group('before') or ''
placeholder = p_matchobj.group('placeholder')
after = p_matchobj.group('after') or ''
whitespace = p_matchobj.group('whitespace') or ''
return before + '%' + placeholder + after + whitespace
|
[
"def",
"_strip_placeholder_braces",
"(",
"p_matchobj",
")",
":",
"before",
"=",
"p_matchobj",
".",
"group",
"(",
"'before'",
")",
"or",
"''",
"placeholder",
"=",
"p_matchobj",
".",
"group",
"(",
"'placeholder'",
")",
"after",
"=",
"p_matchobj",
".",
"group",
"(",
"'after'",
")",
"or",
"''",
"whitespace",
"=",
"p_matchobj",
".",
"group",
"(",
"'whitespace'",
")",
"or",
"''",
"return",
"before",
"+",
"'%'",
"+",
"placeholder",
"+",
"after",
"+",
"whitespace"
] | 39.238095 | 21.714286 |
def _reset(self, load):
"""
Reset sorted list load.
The *load* specifies the load-factor of the list. The default load
factor of '1000' works well for lists from tens to tens of millions of
elements. Good practice is to use a value that is the cube root of the
list size. With billions of elements, the best load factor depends on
your usage. It's best to leave the load factor at the default until
you start benchmarking.
"""
values = reduce(iadd, self._lists, [])
self._clear()
self._load = load
self._half = load >> 1
self._dual = load << 1
self._update(values)
|
[
"def",
"_reset",
"(",
"self",
",",
"load",
")",
":",
"values",
"=",
"reduce",
"(",
"iadd",
",",
"self",
".",
"_lists",
",",
"[",
"]",
")",
"self",
".",
"_clear",
"(",
")",
"self",
".",
"_load",
"=",
"load",
"self",
".",
"_half",
"=",
"load",
">>",
"1",
"self",
".",
"_dual",
"=",
"load",
"<<",
"1",
"self",
".",
"_update",
"(",
"values",
")"
] | 39.529412 | 19.529412 |
def __read(self, i: int) -> bytes:
"""Returns a set number (i) of bytes from self.data."""
b = self.data[self.idx: self.idx + i]
self.idx += i
if len(b) != i:
raise bencodepy.DecodingError(
"Incorrect byte length returned between indexes of {0} and {1}. Possible unexpected End of File."
.format(str(self.idx), str(self.idx - i)))
return b
|
[
"def",
"__read",
"(",
"self",
",",
"i",
":",
"int",
")",
"->",
"bytes",
":",
"b",
"=",
"self",
".",
"data",
"[",
"self",
".",
"idx",
":",
"self",
".",
"idx",
"+",
"i",
"]",
"self",
".",
"idx",
"+=",
"i",
"if",
"len",
"(",
"b",
")",
"!=",
"i",
":",
"raise",
"bencodepy",
".",
"DecodingError",
"(",
"\"Incorrect byte length returned between indexes of {0} and {1}. Possible unexpected End of File.\"",
".",
"format",
"(",
"str",
"(",
"self",
".",
"idx",
")",
",",
"str",
"(",
"self",
".",
"idx",
"-",
"i",
")",
")",
")",
"return",
"b"
] | 46.555556 | 18.666667 |
def export_coreml(self, filename,
include_non_maximum_suppression = True,
iou_threshold = None,
confidence_threshold = None):
"""
Save the model in Core ML format. The Core ML model takes an image of
fixed size as input and produces two output arrays: `confidence` and
`coordinates`.
The first one, `confidence` is an `N`-by-`C` array, where `N` is the
number of instances predicted and `C` is the number of classes. The
number `N` is fixed and will include many low-confidence predictions.
The instances are not sorted by confidence, so the first one will
generally not have the highest confidence (unlike in `predict`). Also
unlike the `predict` function, the instances have not undergone
what is called `non-maximum suppression`, which means there could be
several instances close in location and size that have all discovered
the same object instance. Confidences do not need to sum to 1 over the
classes; any remaining probability is implied as confidence there is no
object instance present at all at the given coordinates. The classes
appear in the array alphabetically sorted.
The second array `coordinates` is of size `N`-by-4, where the first
dimension `N` again represents instances and corresponds to the
`confidence` array. The second dimension represents `x`, `y`, `width`,
`height`, in that order. The values are represented in relative
coordinates, so (0.5, 0.5) represents the center of the image and (1,
1) the bottom right corner. You will need to multiply the relative
values with the original image size before you resized it to the fixed
input size to get pixel-value coordinates similar to `predict`.
See Also
--------
save
Parameters
----------
filename : string
The path of the file where we want to save the Core ML model.
include_non_maximum_suppression : bool
Non-maximum suppression is only available in iOS 12+.
A boolean parameter to indicate whether the Core ML model should be
saved with built-in non-maximum suppression or not.
This parameter is set to True by default.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
Examples
--------
>>> model.export_coreml('detector.mlmodel')
"""
import mxnet as _mx
from .._mxnet._mxnet_to_coreml import _mxnet_converter
import coremltools
from coremltools.models import datatypes, neural_network
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
if confidence_threshold is None: confidence_threshold = 0.25
preds_per_box = 5 + self.num_classes
num_anchors = len(self.anchors)
num_classes = self.num_classes
batch_size = 1
image_shape = (batch_size,) + tuple(self.input_image_shape)
s_image_uint8 = _mx.sym.Variable(self.feature, shape=image_shape, dtype=_np.float32)
s_image = s_image_uint8 / 255
# Swap a maxpool+slice in mxnet to a coreml natively supported layer
from copy import copy
net = copy(self._model)
net._children = copy(self._model._children)
from ._model import _SpecialDarknetMaxpoolBlock
op = _SpecialDarknetMaxpoolBlock(name='pool5')
# Make sure we are removing the right layers
assert (self._model[23].name == 'pool5' and
self._model[24].name == 'specialcrop5')
del net._children[24]
net._children[23] = op
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
# Copy over params from net
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
input_names = [self.feature]
input_dims = [list(self.input_image_shape)]
input_types = [datatypes.Array(*dim) for dim in input_dims]
input_features = list(zip(input_names, input_types))
num_spatial = self._grid_shape[0] * self._grid_shape[1]
num_bounding_boxes = num_anchors * num_spatial
CONFIDENCE_STR = ("raw_confidence" if include_non_maximum_suppression
else "confidence")
COORDINATES_STR = ("raw_coordinates" if include_non_maximum_suppression
else "coordinates")
output_names = [
CONFIDENCE_STR,
COORDINATES_STR
]
output_dims = [
(num_bounding_boxes, num_classes),
(num_bounding_boxes, 4),
]
output_types = [datatypes.Array(*dim) for dim in output_dims]
output_features = list(zip(output_names, output_types))
mode = None
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
_mxnet_converter.convert(mod, mode=None,
input_shape=[(self.feature, image_shape)],
builder=builder, verbose=False)
prefix = '__tc__internal__'
# (1, B, C+5, S*S)
builder.add_reshape(name=prefix + 'ymap_sp_pre',
target_shape=[batch_size, num_anchors, preds_per_box, num_spatial],
mode=0,
input_name='conv8_fwd_output',
output_name=prefix + 'ymap_sp_pre')
# (1, C+5, B, S*S)
builder.add_permute(name=prefix + 'ymap_sp',
dim=[0, 2, 1, 3],
input_name=prefix + 'ymap_sp_pre',
output_name=prefix + 'ymap_sp')
# POSITION: X/Y
# (1, 2, B, S*S)
builder.add_slice(name=prefix + 'raw_rel_xy_sp',
axis='channel',
start_index=0,
end_index=2,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'raw_rel_xy_sp')
# (1, 2, B, S*S)
builder.add_activation(name=prefix + 'rel_xy_sp',
non_linearity='SIGMOID',
input_name=prefix + 'raw_rel_xy_sp',
output_name=prefix + 'rel_xy_sp')
# (1, 2, B*H*W, 1)
builder.add_reshape(name=prefix + 'rel_xy',
target_shape=[batch_size, 2, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'rel_xy_sp',
output_name=prefix + 'rel_xy')
c_xy = _np.array(_np.meshgrid(_np.arange(self._grid_shape[1]),
_np.arange(self._grid_shape[0])), dtype=_np.float32)
c_xy_reshaped = (_np.tile(c_xy[:, _np.newaxis], (num_anchors, 1, 1))
.reshape(2, -1))[_np.newaxis, ..., _np.newaxis]
# (1, 2, B*H*W, 1)
builder.add_load_constant(prefix + 'constant_xy',
constant_value=c_xy_reshaped,
shape=c_xy_reshaped.shape[1:],
output_name=prefix + 'constant_xy')
# (1, 2, B*H*W, 1)
builder.add_elementwise(name=prefix + 'xy',
mode='ADD',
input_names=[prefix + 'constant_xy', prefix + 'rel_xy'],
output_name=prefix + 'xy')
# SHAPE: WIDTH/HEIGHT
# (1, 2, B, S*S)
builder.add_slice(name=prefix + 'raw_rel_wh_sp',
axis='channel',
start_index=2,
end_index=4,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'raw_rel_wh_sp')
# (1, 2, B, S*S)
builder.add_unary(name=prefix + 'rel_wh_sp',
mode='exp',
input_name=prefix + 'raw_rel_wh_sp',
output_name=prefix + 'rel_wh_sp')
# (1, 2*B, S, S)
builder.add_reshape(name=prefix + 'rel_wh',
target_shape=[batch_size, 2 * num_anchors] + list(self._grid_shape),
mode=0,
input_name=prefix + 'rel_wh_sp',
output_name=prefix + 'rel_wh')
np_anchors = _np.asarray(self.anchors, dtype=_np.float32).T
anchors_0 = _np.tile(np_anchors.reshape([2 * num_anchors, 1, 1]), self._grid_shape)
# (1, 2*B, S, S)
builder.add_load_constant(name=prefix + 'c_anchors',
constant_value=anchors_0,
shape=anchors_0.shape,
output_name=prefix + 'c_anchors')
# (1, 2*B, S, S)
builder.add_elementwise(name=prefix + 'wh_pre',
mode='MULTIPLY',
input_names=[prefix + 'c_anchors', prefix + 'rel_wh'],
output_name=prefix + 'wh_pre')
# (1, 2, B*H*W, 1)
builder.add_reshape(name=prefix + 'wh',
target_shape=[1, 2, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'wh_pre',
output_name=prefix + 'wh')
# (1, 4, B*H*W, 1)
builder.add_elementwise(name=prefix + 'boxes_out_transposed',
mode='CONCAT',
input_names=[prefix + 'xy', prefix + 'wh'],
output_name=prefix + 'boxes_out_transposed')
# (1, B*H*W, 4, 1)
builder.add_permute(name=prefix + 'boxes_out',
dim=[0, 2, 1, 3],
input_name=prefix + 'boxes_out_transposed',
output_name=prefix + 'boxes_out')
scale = _np.zeros((num_bounding_boxes, 4, 1))
scale[:, 0::2] = 1.0 / self._grid_shape[1]
scale[:, 1::2] = 1.0 / self._grid_shape[0]
# (1, B*H*W, 4, 1)
builder.add_scale(name=COORDINATES_STR,
W=scale,
b=0,
has_bias=False,
shape_scale=(num_bounding_boxes, 4, 1),
input_name=prefix + 'boxes_out',
output_name=COORDINATES_STR)
# CLASS PROBABILITIES AND OBJECT CONFIDENCE
# (1, C, B, H*W)
builder.add_slice(name=prefix + 'scores_sp',
axis='channel',
start_index=5,
end_index=preds_per_box,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'scores_sp')
# (1, C, B, H*W)
builder.add_softmax(name=prefix + 'probs_sp',
input_name=prefix + 'scores_sp',
output_name=prefix + 'probs_sp')
# (1, 1, B, H*W)
builder.add_slice(name=prefix + 'logit_conf_sp',
axis='channel',
start_index=4,
end_index=5,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'logit_conf_sp')
# (1, 1, B, H*W)
builder.add_activation(name=prefix + 'conf_sp',
non_linearity='SIGMOID',
input_name=prefix + 'logit_conf_sp',
output_name=prefix + 'conf_sp')
# (1, C, B, H*W)
if num_classes > 1:
conf = prefix + 'conf_tiled_sp'
builder.add_elementwise(name=prefix + 'conf_tiled_sp',
mode='CONCAT',
input_names=[prefix+'conf_sp']*num_classes,
output_name=conf)
else:
conf = prefix + 'conf_sp'
# (1, C, B, H*W)
builder.add_elementwise(name=prefix + 'confprobs_sp',
mode='MULTIPLY',
input_names=[conf, prefix + 'probs_sp'],
output_name=prefix + 'confprobs_sp')
# (1, C, B*H*W, 1)
builder.add_reshape(name=prefix + 'confprobs_transposed',
target_shape=[1, num_classes, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'confprobs_sp',
output_name=prefix + 'confprobs_transposed')
# (1, B*H*W, C, 1)
builder.add_permute(name=CONFIDENCE_STR,
dim=[0, 2, 1, 3],
input_name=prefix + 'confprobs_transposed',
output_name=CONFIDENCE_STR)
_mxnet_converter._set_input_output_layers(
builder, input_names, output_names)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
builder.set_pre_processing_parameters(image_input_names=self.feature)
model = builder.spec
if include_non_maximum_suppression:
# Non-Maximum Suppression is a post-processing algorithm
# responsible for merging all detections that belong to the
# same object.
# Core ML schematic
# +------------------------------------+
# | Pipeline |
# | |
# | +------------+ +-------------+ |
# | | Neural | | Non-maximum | |
# | | network +---> suppression +-----> confidences
# Image +----> | | | |
# | | +---> +-----> coordinates
# | | | | | |
# Optional inputs: | +------------+ +-^---^-------+ |
# | | | |
# IOU threshold +-----------------------+ | |
# | | |
# Confidence threshold +---------------------------+ |
# +------------------------------------+
model_neural_network = model.neuralNetwork
model.specificationVersion = 3
model.pipeline.ParseFromString(b'')
model.pipeline.models.add()
model.pipeline.models[0].neuralNetwork.ParseFromString(b'')
model.pipeline.models.add()
model.pipeline.models[1].nonMaximumSuppression.ParseFromString(b'')
# begin: Neural network model
nn_model = model.pipeline.models[0]
nn_model.description.ParseFromString(b'')
input_image = model.description.input[0]
input_image.type.imageType.width = self.input_image_shape[1]
input_image.type.imageType.height = self.input_image_shape[2]
nn_model.description.input.add()
nn_model.description.input[0].ParseFromString(
input_image.SerializeToString())
for i in range(2):
del model.description.output[i].type.multiArrayType.shape[:]
names = ["raw_confidence", "raw_coordinates"]
bounds = [self.num_classes, 4]
for i in range(2):
output_i = model.description.output[i]
output_i.name = names[i]
for j in range(2):
ma_type = output_i.type.multiArrayType
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[j].lowerBound = (
bounds[i] if j == 1 else 0)
ma_type.shapeRange.sizeRanges[j].upperBound = (
bounds[i] if j == 1 else -1)
nn_model.description.output.add()
nn_model.description.output[i].ParseFromString(
output_i.SerializeToString())
ma_type = nn_model.description.output[i].type.multiArrayType
ma_type.shape.append(num_bounding_boxes)
ma_type.shape.append(bounds[i])
# Think more about this line
nn_model.neuralNetwork.ParseFromString(
model_neural_network.SerializeToString())
nn_model.specificationVersion = model.specificationVersion
# end: Neural network model
# begin: Non maximum suppression model
nms_model = model.pipeline.models[1]
nms_model_nonMaxSup = nms_model.nonMaximumSuppression
for i in range(2):
output_i = model.description.output[i]
nms_model.description.input.add()
nms_model.description.input[i].ParseFromString(
output_i.SerializeToString())
nms_model.description.output.add()
nms_model.description.output[i].ParseFromString(
output_i.SerializeToString())
nms_model.description.output[i].name = (
'confidence' if i==0 else 'coordinates')
nms_model_nonMaxSup.iouThreshold = iou_threshold
nms_model_nonMaxSup.confidenceThreshold = confidence_threshold
nms_model_nonMaxSup.confidenceInputFeatureName = 'raw_confidence'
nms_model_nonMaxSup.coordinatesInputFeatureName = 'raw_coordinates'
nms_model_nonMaxSup.confidenceOutputFeatureName = 'confidence'
nms_model_nonMaxSup.coordinatesOutputFeatureName = 'coordinates'
nms_model.specificationVersion = model.specificationVersion
nms_model_nonMaxSup.stringClassLabels.vector.extend(self.classes)
for i in range(2):
nms_model.description.input[i].ParseFromString(
nn_model.description.output[i].SerializeToString()
)
if include_non_maximum_suppression:
# Iou Threshold
IOU_THRESHOLD_STRING = 'iouThreshold'
model.description.input.add()
model.description.input[1].type.doubleType.ParseFromString(b'')
model.description.input[1].name = IOU_THRESHOLD_STRING
nms_model.description.input.add()
nms_model.description.input[2].ParseFromString(
model.description.input[1].SerializeToString()
)
nms_model_nonMaxSup.iouThresholdInputFeatureName = IOU_THRESHOLD_STRING
# Confidence Threshold
CONFIDENCE_THRESHOLD_STRING = 'confidenceThreshold'
model.description.input.add()
model.description.input[2].type.doubleType.ParseFromString(b'')
model.description.input[2].name = CONFIDENCE_THRESHOLD_STRING
nms_model.description.input.add()
nms_model.description.input[3].ParseFromString(
model.description.input[2].SerializeToString())
nms_model_nonMaxSup.confidenceThresholdInputFeatureName = \
CONFIDENCE_THRESHOLD_STRING
# end: Non maximum suppression model
model.description.output[0].name = 'confidence'
model.description.output[1].name = 'coordinates'
iouThresholdString = '(optional) IOU Threshold override (default: {})'
confidenceThresholdString = ('(optional)' +
' Confidence Threshold override (default: {})')
model_type = 'object detector (%s)' % self.model
if include_non_maximum_suppression:
model_type += ' with non-maximum suppression'
model.description.metadata.shortDescription = \
_coreml_utils._mlmodel_short_description(model_type)
model.description.input[0].shortDescription = 'Input image'
if include_non_maximum_suppression:
iouThresholdString = '(optional) IOU Threshold override (default: {})'
model.description.input[1].shortDescription = \
iouThresholdString.format(iou_threshold)
confidenceThresholdString = ('(optional)' +
' Confidence Threshold override (default: {})')
model.description.input[2].shortDescription = \
confidenceThresholdString.format(confidence_threshold)
model.description.output[0].shortDescription = \
u'Boxes \xd7 Class confidence (see user-defined metadata "classes")'
model.description.output[1].shortDescription = \
u'Boxes \xd7 [x, y, width, height] (relative to image size)'
version = ObjectDetector._PYTHON_OBJECT_DETECTOR_VERSION
partial_user_defined_metadata = {
'model': self.model,
'max_iterations': str(self.max_iterations),
'training_iterations': str(self.training_iterations),
'include_non_maximum_suppression': str(
include_non_maximum_suppression),
'non_maximum_suppression_threshold': str(
iou_threshold),
'confidence_threshold': str(confidence_threshold),
'iou_threshold': str(iou_threshold),
'feature': self.feature,
'annotations': self.annotations,
'classes': ','.join(self.classes)
}
user_defined_metadata = _coreml_utils._get_model_metadata(
self.__class__.__name__,
partial_user_defined_metadata,
version)
model.description.metadata.userDefined.update(user_defined_metadata)
from coremltools.models.utils import save_spec as _save_spec
_save_spec(model, filename)
|
[
"def",
"export_coreml",
"(",
"self",
",",
"filename",
",",
"include_non_maximum_suppression",
"=",
"True",
",",
"iou_threshold",
"=",
"None",
",",
"confidence_threshold",
"=",
"None",
")",
":",
"import",
"mxnet",
"as",
"_mx",
"from",
".",
".",
"_mxnet",
".",
"_mxnet_to_coreml",
"import",
"_mxnet_converter",
"import",
"coremltools",
"from",
"coremltools",
".",
"models",
"import",
"datatypes",
",",
"neural_network",
"if",
"iou_threshold",
"is",
"None",
":",
"iou_threshold",
"=",
"self",
".",
"non_maximum_suppression_threshold",
"if",
"confidence_threshold",
"is",
"None",
":",
"confidence_threshold",
"=",
"0.25",
"preds_per_box",
"=",
"5",
"+",
"self",
".",
"num_classes",
"num_anchors",
"=",
"len",
"(",
"self",
".",
"anchors",
")",
"num_classes",
"=",
"self",
".",
"num_classes",
"batch_size",
"=",
"1",
"image_shape",
"=",
"(",
"batch_size",
",",
")",
"+",
"tuple",
"(",
"self",
".",
"input_image_shape",
")",
"s_image_uint8",
"=",
"_mx",
".",
"sym",
".",
"Variable",
"(",
"self",
".",
"feature",
",",
"shape",
"=",
"image_shape",
",",
"dtype",
"=",
"_np",
".",
"float32",
")",
"s_image",
"=",
"s_image_uint8",
"/",
"255",
"# Swap a maxpool+slice in mxnet to a coreml natively supported layer",
"from",
"copy",
"import",
"copy",
"net",
"=",
"copy",
"(",
"self",
".",
"_model",
")",
"net",
".",
"_children",
"=",
"copy",
"(",
"self",
".",
"_model",
".",
"_children",
")",
"from",
".",
"_model",
"import",
"_SpecialDarknetMaxpoolBlock",
"op",
"=",
"_SpecialDarknetMaxpoolBlock",
"(",
"name",
"=",
"'pool5'",
")",
"# Make sure we are removing the right layers",
"assert",
"(",
"self",
".",
"_model",
"[",
"23",
"]",
".",
"name",
"==",
"'pool5'",
"and",
"self",
".",
"_model",
"[",
"24",
"]",
".",
"name",
"==",
"'specialcrop5'",
")",
"del",
"net",
".",
"_children",
"[",
"24",
"]",
"net",
".",
"_children",
"[",
"23",
"]",
"=",
"op",
"s_ymap",
"=",
"net",
"(",
"s_image",
")",
"mod",
"=",
"_mx",
".",
"mod",
".",
"Module",
"(",
"symbol",
"=",
"s_ymap",
",",
"label_names",
"=",
"None",
",",
"data_names",
"=",
"[",
"self",
".",
"feature",
"]",
")",
"mod",
".",
"bind",
"(",
"for_training",
"=",
"False",
",",
"data_shapes",
"=",
"[",
"(",
"self",
".",
"feature",
",",
"image_shape",
")",
"]",
")",
"# Copy over params from net",
"mod",
".",
"init_params",
"(",
")",
"arg_params",
",",
"aux_params",
"=",
"mod",
".",
"get_params",
"(",
")",
"net_params",
"=",
"net",
".",
"collect_params",
"(",
")",
"new_arg_params",
"=",
"{",
"}",
"for",
"k",
",",
"param",
"in",
"arg_params",
".",
"items",
"(",
")",
":",
"new_arg_params",
"[",
"k",
"]",
"=",
"net_params",
"[",
"k",
"]",
".",
"data",
"(",
"net_params",
"[",
"k",
"]",
".",
"list_ctx",
"(",
")",
"[",
"0",
"]",
")",
"new_aux_params",
"=",
"{",
"}",
"for",
"k",
",",
"param",
"in",
"aux_params",
".",
"items",
"(",
")",
":",
"new_aux_params",
"[",
"k",
"]",
"=",
"net_params",
"[",
"k",
"]",
".",
"data",
"(",
"net_params",
"[",
"k",
"]",
".",
"list_ctx",
"(",
")",
"[",
"0",
"]",
")",
"mod",
".",
"set_params",
"(",
"new_arg_params",
",",
"new_aux_params",
")",
"input_names",
"=",
"[",
"self",
".",
"feature",
"]",
"input_dims",
"=",
"[",
"list",
"(",
"self",
".",
"input_image_shape",
")",
"]",
"input_types",
"=",
"[",
"datatypes",
".",
"Array",
"(",
"*",
"dim",
")",
"for",
"dim",
"in",
"input_dims",
"]",
"input_features",
"=",
"list",
"(",
"zip",
"(",
"input_names",
",",
"input_types",
")",
")",
"num_spatial",
"=",
"self",
".",
"_grid_shape",
"[",
"0",
"]",
"*",
"self",
".",
"_grid_shape",
"[",
"1",
"]",
"num_bounding_boxes",
"=",
"num_anchors",
"*",
"num_spatial",
"CONFIDENCE_STR",
"=",
"(",
"\"raw_confidence\"",
"if",
"include_non_maximum_suppression",
"else",
"\"confidence\"",
")",
"COORDINATES_STR",
"=",
"(",
"\"raw_coordinates\"",
"if",
"include_non_maximum_suppression",
"else",
"\"coordinates\"",
")",
"output_names",
"=",
"[",
"CONFIDENCE_STR",
",",
"COORDINATES_STR",
"]",
"output_dims",
"=",
"[",
"(",
"num_bounding_boxes",
",",
"num_classes",
")",
",",
"(",
"num_bounding_boxes",
",",
"4",
")",
",",
"]",
"output_types",
"=",
"[",
"datatypes",
".",
"Array",
"(",
"*",
"dim",
")",
"for",
"dim",
"in",
"output_dims",
"]",
"output_features",
"=",
"list",
"(",
"zip",
"(",
"output_names",
",",
"output_types",
")",
")",
"mode",
"=",
"None",
"builder",
"=",
"neural_network",
".",
"NeuralNetworkBuilder",
"(",
"input_features",
",",
"output_features",
",",
"mode",
")",
"_mxnet_converter",
".",
"convert",
"(",
"mod",
",",
"mode",
"=",
"None",
",",
"input_shape",
"=",
"[",
"(",
"self",
".",
"feature",
",",
"image_shape",
")",
"]",
",",
"builder",
"=",
"builder",
",",
"verbose",
"=",
"False",
")",
"prefix",
"=",
"'__tc__internal__'",
"# (1, B, C+5, S*S)",
"builder",
".",
"add_reshape",
"(",
"name",
"=",
"prefix",
"+",
"'ymap_sp_pre'",
",",
"target_shape",
"=",
"[",
"batch_size",
",",
"num_anchors",
",",
"preds_per_box",
",",
"num_spatial",
"]",
",",
"mode",
"=",
"0",
",",
"input_name",
"=",
"'conv8_fwd_output'",
",",
"output_name",
"=",
"prefix",
"+",
"'ymap_sp_pre'",
")",
"# (1, C+5, B, S*S)",
"builder",
".",
"add_permute",
"(",
"name",
"=",
"prefix",
"+",
"'ymap_sp'",
",",
"dim",
"=",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
",",
"input_name",
"=",
"prefix",
"+",
"'ymap_sp_pre'",
",",
"output_name",
"=",
"prefix",
"+",
"'ymap_sp'",
")",
"# POSITION: X/Y",
"# (1, 2, B, S*S)",
"builder",
".",
"add_slice",
"(",
"name",
"=",
"prefix",
"+",
"'raw_rel_xy_sp'",
",",
"axis",
"=",
"'channel'",
",",
"start_index",
"=",
"0",
",",
"end_index",
"=",
"2",
",",
"stride",
"=",
"1",
",",
"input_name",
"=",
"prefix",
"+",
"'ymap_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'raw_rel_xy_sp'",
")",
"# (1, 2, B, S*S)",
"builder",
".",
"add_activation",
"(",
"name",
"=",
"prefix",
"+",
"'rel_xy_sp'",
",",
"non_linearity",
"=",
"'SIGMOID'",
",",
"input_name",
"=",
"prefix",
"+",
"'raw_rel_xy_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'rel_xy_sp'",
")",
"# (1, 2, B*H*W, 1)",
"builder",
".",
"add_reshape",
"(",
"name",
"=",
"prefix",
"+",
"'rel_xy'",
",",
"target_shape",
"=",
"[",
"batch_size",
",",
"2",
",",
"num_bounding_boxes",
",",
"1",
"]",
",",
"mode",
"=",
"0",
",",
"input_name",
"=",
"prefix",
"+",
"'rel_xy_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'rel_xy'",
")",
"c_xy",
"=",
"_np",
".",
"array",
"(",
"_np",
".",
"meshgrid",
"(",
"_np",
".",
"arange",
"(",
"self",
".",
"_grid_shape",
"[",
"1",
"]",
")",
",",
"_np",
".",
"arange",
"(",
"self",
".",
"_grid_shape",
"[",
"0",
"]",
")",
")",
",",
"dtype",
"=",
"_np",
".",
"float32",
")",
"c_xy_reshaped",
"=",
"(",
"_np",
".",
"tile",
"(",
"c_xy",
"[",
":",
",",
"_np",
".",
"newaxis",
"]",
",",
"(",
"num_anchors",
",",
"1",
",",
"1",
")",
")",
".",
"reshape",
"(",
"2",
",",
"-",
"1",
")",
")",
"[",
"_np",
".",
"newaxis",
",",
"...",
",",
"_np",
".",
"newaxis",
"]",
"# (1, 2, B*H*W, 1)",
"builder",
".",
"add_load_constant",
"(",
"prefix",
"+",
"'constant_xy'",
",",
"constant_value",
"=",
"c_xy_reshaped",
",",
"shape",
"=",
"c_xy_reshaped",
".",
"shape",
"[",
"1",
":",
"]",
",",
"output_name",
"=",
"prefix",
"+",
"'constant_xy'",
")",
"# (1, 2, B*H*W, 1)",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"prefix",
"+",
"'xy'",
",",
"mode",
"=",
"'ADD'",
",",
"input_names",
"=",
"[",
"prefix",
"+",
"'constant_xy'",
",",
"prefix",
"+",
"'rel_xy'",
"]",
",",
"output_name",
"=",
"prefix",
"+",
"'xy'",
")",
"# SHAPE: WIDTH/HEIGHT",
"# (1, 2, B, S*S)",
"builder",
".",
"add_slice",
"(",
"name",
"=",
"prefix",
"+",
"'raw_rel_wh_sp'",
",",
"axis",
"=",
"'channel'",
",",
"start_index",
"=",
"2",
",",
"end_index",
"=",
"4",
",",
"stride",
"=",
"1",
",",
"input_name",
"=",
"prefix",
"+",
"'ymap_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'raw_rel_wh_sp'",
")",
"# (1, 2, B, S*S)",
"builder",
".",
"add_unary",
"(",
"name",
"=",
"prefix",
"+",
"'rel_wh_sp'",
",",
"mode",
"=",
"'exp'",
",",
"input_name",
"=",
"prefix",
"+",
"'raw_rel_wh_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'rel_wh_sp'",
")",
"# (1, 2*B, S, S)",
"builder",
".",
"add_reshape",
"(",
"name",
"=",
"prefix",
"+",
"'rel_wh'",
",",
"target_shape",
"=",
"[",
"batch_size",
",",
"2",
"*",
"num_anchors",
"]",
"+",
"list",
"(",
"self",
".",
"_grid_shape",
")",
",",
"mode",
"=",
"0",
",",
"input_name",
"=",
"prefix",
"+",
"'rel_wh_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'rel_wh'",
")",
"np_anchors",
"=",
"_np",
".",
"asarray",
"(",
"self",
".",
"anchors",
",",
"dtype",
"=",
"_np",
".",
"float32",
")",
".",
"T",
"anchors_0",
"=",
"_np",
".",
"tile",
"(",
"np_anchors",
".",
"reshape",
"(",
"[",
"2",
"*",
"num_anchors",
",",
"1",
",",
"1",
"]",
")",
",",
"self",
".",
"_grid_shape",
")",
"# (1, 2*B, S, S)",
"builder",
".",
"add_load_constant",
"(",
"name",
"=",
"prefix",
"+",
"'c_anchors'",
",",
"constant_value",
"=",
"anchors_0",
",",
"shape",
"=",
"anchors_0",
".",
"shape",
",",
"output_name",
"=",
"prefix",
"+",
"'c_anchors'",
")",
"# (1, 2*B, S, S)",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"prefix",
"+",
"'wh_pre'",
",",
"mode",
"=",
"'MULTIPLY'",
",",
"input_names",
"=",
"[",
"prefix",
"+",
"'c_anchors'",
",",
"prefix",
"+",
"'rel_wh'",
"]",
",",
"output_name",
"=",
"prefix",
"+",
"'wh_pre'",
")",
"# (1, 2, B*H*W, 1)",
"builder",
".",
"add_reshape",
"(",
"name",
"=",
"prefix",
"+",
"'wh'",
",",
"target_shape",
"=",
"[",
"1",
",",
"2",
",",
"num_bounding_boxes",
",",
"1",
"]",
",",
"mode",
"=",
"0",
",",
"input_name",
"=",
"prefix",
"+",
"'wh_pre'",
",",
"output_name",
"=",
"prefix",
"+",
"'wh'",
")",
"# (1, 4, B*H*W, 1)",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"prefix",
"+",
"'boxes_out_transposed'",
",",
"mode",
"=",
"'CONCAT'",
",",
"input_names",
"=",
"[",
"prefix",
"+",
"'xy'",
",",
"prefix",
"+",
"'wh'",
"]",
",",
"output_name",
"=",
"prefix",
"+",
"'boxes_out_transposed'",
")",
"# (1, B*H*W, 4, 1)",
"builder",
".",
"add_permute",
"(",
"name",
"=",
"prefix",
"+",
"'boxes_out'",
",",
"dim",
"=",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
",",
"input_name",
"=",
"prefix",
"+",
"'boxes_out_transposed'",
",",
"output_name",
"=",
"prefix",
"+",
"'boxes_out'",
")",
"scale",
"=",
"_np",
".",
"zeros",
"(",
"(",
"num_bounding_boxes",
",",
"4",
",",
"1",
")",
")",
"scale",
"[",
":",
",",
"0",
":",
":",
"2",
"]",
"=",
"1.0",
"/",
"self",
".",
"_grid_shape",
"[",
"1",
"]",
"scale",
"[",
":",
",",
"1",
":",
":",
"2",
"]",
"=",
"1.0",
"/",
"self",
".",
"_grid_shape",
"[",
"0",
"]",
"# (1, B*H*W, 4, 1)",
"builder",
".",
"add_scale",
"(",
"name",
"=",
"COORDINATES_STR",
",",
"W",
"=",
"scale",
",",
"b",
"=",
"0",
",",
"has_bias",
"=",
"False",
",",
"shape_scale",
"=",
"(",
"num_bounding_boxes",
",",
"4",
",",
"1",
")",
",",
"input_name",
"=",
"prefix",
"+",
"'boxes_out'",
",",
"output_name",
"=",
"COORDINATES_STR",
")",
"# CLASS PROBABILITIES AND OBJECT CONFIDENCE",
"# (1, C, B, H*W)",
"builder",
".",
"add_slice",
"(",
"name",
"=",
"prefix",
"+",
"'scores_sp'",
",",
"axis",
"=",
"'channel'",
",",
"start_index",
"=",
"5",
",",
"end_index",
"=",
"preds_per_box",
",",
"stride",
"=",
"1",
",",
"input_name",
"=",
"prefix",
"+",
"'ymap_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'scores_sp'",
")",
"# (1, C, B, H*W)",
"builder",
".",
"add_softmax",
"(",
"name",
"=",
"prefix",
"+",
"'probs_sp'",
",",
"input_name",
"=",
"prefix",
"+",
"'scores_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'probs_sp'",
")",
"# (1, 1, B, H*W)",
"builder",
".",
"add_slice",
"(",
"name",
"=",
"prefix",
"+",
"'logit_conf_sp'",
",",
"axis",
"=",
"'channel'",
",",
"start_index",
"=",
"4",
",",
"end_index",
"=",
"5",
",",
"stride",
"=",
"1",
",",
"input_name",
"=",
"prefix",
"+",
"'ymap_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'logit_conf_sp'",
")",
"# (1, 1, B, H*W)",
"builder",
".",
"add_activation",
"(",
"name",
"=",
"prefix",
"+",
"'conf_sp'",
",",
"non_linearity",
"=",
"'SIGMOID'",
",",
"input_name",
"=",
"prefix",
"+",
"'logit_conf_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'conf_sp'",
")",
"# (1, C, B, H*W)",
"if",
"num_classes",
">",
"1",
":",
"conf",
"=",
"prefix",
"+",
"'conf_tiled_sp'",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"prefix",
"+",
"'conf_tiled_sp'",
",",
"mode",
"=",
"'CONCAT'",
",",
"input_names",
"=",
"[",
"prefix",
"+",
"'conf_sp'",
"]",
"*",
"num_classes",
",",
"output_name",
"=",
"conf",
")",
"else",
":",
"conf",
"=",
"prefix",
"+",
"'conf_sp'",
"# (1, C, B, H*W)",
"builder",
".",
"add_elementwise",
"(",
"name",
"=",
"prefix",
"+",
"'confprobs_sp'",
",",
"mode",
"=",
"'MULTIPLY'",
",",
"input_names",
"=",
"[",
"conf",
",",
"prefix",
"+",
"'probs_sp'",
"]",
",",
"output_name",
"=",
"prefix",
"+",
"'confprobs_sp'",
")",
"# (1, C, B*H*W, 1)",
"builder",
".",
"add_reshape",
"(",
"name",
"=",
"prefix",
"+",
"'confprobs_transposed'",
",",
"target_shape",
"=",
"[",
"1",
",",
"num_classes",
",",
"num_bounding_boxes",
",",
"1",
"]",
",",
"mode",
"=",
"0",
",",
"input_name",
"=",
"prefix",
"+",
"'confprobs_sp'",
",",
"output_name",
"=",
"prefix",
"+",
"'confprobs_transposed'",
")",
"# (1, B*H*W, C, 1)",
"builder",
".",
"add_permute",
"(",
"name",
"=",
"CONFIDENCE_STR",
",",
"dim",
"=",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
",",
"input_name",
"=",
"prefix",
"+",
"'confprobs_transposed'",
",",
"output_name",
"=",
"CONFIDENCE_STR",
")",
"_mxnet_converter",
".",
"_set_input_output_layers",
"(",
"builder",
",",
"input_names",
",",
"output_names",
")",
"builder",
".",
"set_input",
"(",
"input_names",
",",
"input_dims",
")",
"builder",
".",
"set_output",
"(",
"output_names",
",",
"output_dims",
")",
"builder",
".",
"set_pre_processing_parameters",
"(",
"image_input_names",
"=",
"self",
".",
"feature",
")",
"model",
"=",
"builder",
".",
"spec",
"if",
"include_non_maximum_suppression",
":",
"# Non-Maximum Suppression is a post-processing algorithm",
"# responsible for merging all detections that belong to the",
"# same object.",
"# Core ML schematic ",
"# +------------------------------------+",
"# | Pipeline |",
"# | |",
"# | +------------+ +-------------+ |",
"# | | Neural | | Non-maximum | |",
"# | | network +---> suppression +-----> confidences",
"# Image +----> | | | |",
"# | | +---> +-----> coordinates",
"# | | | | | |",
"# Optional inputs: | +------------+ +-^---^-------+ |",
"# | | | |",
"# IOU threshold +-----------------------+ | |",
"# | | |",
"# Confidence threshold +---------------------------+ |",
"# +------------------------------------+",
"model_neural_network",
"=",
"model",
".",
"neuralNetwork",
"model",
".",
"specificationVersion",
"=",
"3",
"model",
".",
"pipeline",
".",
"ParseFromString",
"(",
"b''",
")",
"model",
".",
"pipeline",
".",
"models",
".",
"add",
"(",
")",
"model",
".",
"pipeline",
".",
"models",
"[",
"0",
"]",
".",
"neuralNetwork",
".",
"ParseFromString",
"(",
"b''",
")",
"model",
".",
"pipeline",
".",
"models",
".",
"add",
"(",
")",
"model",
".",
"pipeline",
".",
"models",
"[",
"1",
"]",
".",
"nonMaximumSuppression",
".",
"ParseFromString",
"(",
"b''",
")",
"# begin: Neural network model",
"nn_model",
"=",
"model",
".",
"pipeline",
".",
"models",
"[",
"0",
"]",
"nn_model",
".",
"description",
".",
"ParseFromString",
"(",
"b''",
")",
"input_image",
"=",
"model",
".",
"description",
".",
"input",
"[",
"0",
"]",
"input_image",
".",
"type",
".",
"imageType",
".",
"width",
"=",
"self",
".",
"input_image_shape",
"[",
"1",
"]",
"input_image",
".",
"type",
".",
"imageType",
".",
"height",
"=",
"self",
".",
"input_image_shape",
"[",
"2",
"]",
"nn_model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"nn_model",
".",
"description",
".",
"input",
"[",
"0",
"]",
".",
"ParseFromString",
"(",
"input_image",
".",
"SerializeToString",
"(",
")",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"del",
"model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"type",
".",
"multiArrayType",
".",
"shape",
"[",
":",
"]",
"names",
"=",
"[",
"\"raw_confidence\"",
",",
"\"raw_coordinates\"",
"]",
"bounds",
"=",
"[",
"self",
".",
"num_classes",
",",
"4",
"]",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"output_i",
"=",
"model",
".",
"description",
".",
"output",
"[",
"i",
"]",
"output_i",
".",
"name",
"=",
"names",
"[",
"i",
"]",
"for",
"j",
"in",
"range",
"(",
"2",
")",
":",
"ma_type",
"=",
"output_i",
".",
"type",
".",
"multiArrayType",
"ma_type",
".",
"shapeRange",
".",
"sizeRanges",
".",
"add",
"(",
")",
"ma_type",
".",
"shapeRange",
".",
"sizeRanges",
"[",
"j",
"]",
".",
"lowerBound",
"=",
"(",
"bounds",
"[",
"i",
"]",
"if",
"j",
"==",
"1",
"else",
"0",
")",
"ma_type",
".",
"shapeRange",
".",
"sizeRanges",
"[",
"j",
"]",
".",
"upperBound",
"=",
"(",
"bounds",
"[",
"i",
"]",
"if",
"j",
"==",
"1",
"else",
"-",
"1",
")",
"nn_model",
".",
"description",
".",
"output",
".",
"add",
"(",
")",
"nn_model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"ParseFromString",
"(",
"output_i",
".",
"SerializeToString",
"(",
")",
")",
"ma_type",
"=",
"nn_model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"type",
".",
"multiArrayType",
"ma_type",
".",
"shape",
".",
"append",
"(",
"num_bounding_boxes",
")",
"ma_type",
".",
"shape",
".",
"append",
"(",
"bounds",
"[",
"i",
"]",
")",
"# Think more about this line",
"nn_model",
".",
"neuralNetwork",
".",
"ParseFromString",
"(",
"model_neural_network",
".",
"SerializeToString",
"(",
")",
")",
"nn_model",
".",
"specificationVersion",
"=",
"model",
".",
"specificationVersion",
"# end: Neural network model",
"# begin: Non maximum suppression model",
"nms_model",
"=",
"model",
".",
"pipeline",
".",
"models",
"[",
"1",
"]",
"nms_model_nonMaxSup",
"=",
"nms_model",
".",
"nonMaximumSuppression",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"output_i",
"=",
"model",
".",
"description",
".",
"output",
"[",
"i",
"]",
"nms_model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"nms_model",
".",
"description",
".",
"input",
"[",
"i",
"]",
".",
"ParseFromString",
"(",
"output_i",
".",
"SerializeToString",
"(",
")",
")",
"nms_model",
".",
"description",
".",
"output",
".",
"add",
"(",
")",
"nms_model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"ParseFromString",
"(",
"output_i",
".",
"SerializeToString",
"(",
")",
")",
"nms_model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"name",
"=",
"(",
"'confidence'",
"if",
"i",
"==",
"0",
"else",
"'coordinates'",
")",
"nms_model_nonMaxSup",
".",
"iouThreshold",
"=",
"iou_threshold",
"nms_model_nonMaxSup",
".",
"confidenceThreshold",
"=",
"confidence_threshold",
"nms_model_nonMaxSup",
".",
"confidenceInputFeatureName",
"=",
"'raw_confidence'",
"nms_model_nonMaxSup",
".",
"coordinatesInputFeatureName",
"=",
"'raw_coordinates'",
"nms_model_nonMaxSup",
".",
"confidenceOutputFeatureName",
"=",
"'confidence'",
"nms_model_nonMaxSup",
".",
"coordinatesOutputFeatureName",
"=",
"'coordinates'",
"nms_model",
".",
"specificationVersion",
"=",
"model",
".",
"specificationVersion",
"nms_model_nonMaxSup",
".",
"stringClassLabels",
".",
"vector",
".",
"extend",
"(",
"self",
".",
"classes",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"nms_model",
".",
"description",
".",
"input",
"[",
"i",
"]",
".",
"ParseFromString",
"(",
"nn_model",
".",
"description",
".",
"output",
"[",
"i",
"]",
".",
"SerializeToString",
"(",
")",
")",
"if",
"include_non_maximum_suppression",
":",
"# Iou Threshold",
"IOU_THRESHOLD_STRING",
"=",
"'iouThreshold'",
"model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"model",
".",
"description",
".",
"input",
"[",
"1",
"]",
".",
"type",
".",
"doubleType",
".",
"ParseFromString",
"(",
"b''",
")",
"model",
".",
"description",
".",
"input",
"[",
"1",
"]",
".",
"name",
"=",
"IOU_THRESHOLD_STRING",
"nms_model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"nms_model",
".",
"description",
".",
"input",
"[",
"2",
"]",
".",
"ParseFromString",
"(",
"model",
".",
"description",
".",
"input",
"[",
"1",
"]",
".",
"SerializeToString",
"(",
")",
")",
"nms_model_nonMaxSup",
".",
"iouThresholdInputFeatureName",
"=",
"IOU_THRESHOLD_STRING",
"# Confidence Threshold",
"CONFIDENCE_THRESHOLD_STRING",
"=",
"'confidenceThreshold'",
"model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"model",
".",
"description",
".",
"input",
"[",
"2",
"]",
".",
"type",
".",
"doubleType",
".",
"ParseFromString",
"(",
"b''",
")",
"model",
".",
"description",
".",
"input",
"[",
"2",
"]",
".",
"name",
"=",
"CONFIDENCE_THRESHOLD_STRING",
"nms_model",
".",
"description",
".",
"input",
".",
"add",
"(",
")",
"nms_model",
".",
"description",
".",
"input",
"[",
"3",
"]",
".",
"ParseFromString",
"(",
"model",
".",
"description",
".",
"input",
"[",
"2",
"]",
".",
"SerializeToString",
"(",
")",
")",
"nms_model_nonMaxSup",
".",
"confidenceThresholdInputFeatureName",
"=",
"CONFIDENCE_THRESHOLD_STRING",
"# end: Non maximum suppression model",
"model",
".",
"description",
".",
"output",
"[",
"0",
"]",
".",
"name",
"=",
"'confidence'",
"model",
".",
"description",
".",
"output",
"[",
"1",
"]",
".",
"name",
"=",
"'coordinates'",
"iouThresholdString",
"=",
"'(optional) IOU Threshold override (default: {})'",
"confidenceThresholdString",
"=",
"(",
"'(optional)'",
"+",
"' Confidence Threshold override (default: {})'",
")",
"model_type",
"=",
"'object detector (%s)'",
"%",
"self",
".",
"model",
"if",
"include_non_maximum_suppression",
":",
"model_type",
"+=",
"' with non-maximum suppression'",
"model",
".",
"description",
".",
"metadata",
".",
"shortDescription",
"=",
"_coreml_utils",
".",
"_mlmodel_short_description",
"(",
"model_type",
")",
"model",
".",
"description",
".",
"input",
"[",
"0",
"]",
".",
"shortDescription",
"=",
"'Input image'",
"if",
"include_non_maximum_suppression",
":",
"iouThresholdString",
"=",
"'(optional) IOU Threshold override (default: {})'",
"model",
".",
"description",
".",
"input",
"[",
"1",
"]",
".",
"shortDescription",
"=",
"iouThresholdString",
".",
"format",
"(",
"iou_threshold",
")",
"confidenceThresholdString",
"=",
"(",
"'(optional)'",
"+",
"' Confidence Threshold override (default: {})'",
")",
"model",
".",
"description",
".",
"input",
"[",
"2",
"]",
".",
"shortDescription",
"=",
"confidenceThresholdString",
".",
"format",
"(",
"confidence_threshold",
")",
"model",
".",
"description",
".",
"output",
"[",
"0",
"]",
".",
"shortDescription",
"=",
"u'Boxes \\xd7 Class confidence (see user-defined metadata \"classes\")'",
"model",
".",
"description",
".",
"output",
"[",
"1",
"]",
".",
"shortDescription",
"=",
"u'Boxes \\xd7 [x, y, width, height] (relative to image size)'",
"version",
"=",
"ObjectDetector",
".",
"_PYTHON_OBJECT_DETECTOR_VERSION",
"partial_user_defined_metadata",
"=",
"{",
"'model'",
":",
"self",
".",
"model",
",",
"'max_iterations'",
":",
"str",
"(",
"self",
".",
"max_iterations",
")",
",",
"'training_iterations'",
":",
"str",
"(",
"self",
".",
"training_iterations",
")",
",",
"'include_non_maximum_suppression'",
":",
"str",
"(",
"include_non_maximum_suppression",
")",
",",
"'non_maximum_suppression_threshold'",
":",
"str",
"(",
"iou_threshold",
")",
",",
"'confidence_threshold'",
":",
"str",
"(",
"confidence_threshold",
")",
",",
"'iou_threshold'",
":",
"str",
"(",
"iou_threshold",
")",
",",
"'feature'",
":",
"self",
".",
"feature",
",",
"'annotations'",
":",
"self",
".",
"annotations",
",",
"'classes'",
":",
"','",
".",
"join",
"(",
"self",
".",
"classes",
")",
"}",
"user_defined_metadata",
"=",
"_coreml_utils",
".",
"_get_model_metadata",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"partial_user_defined_metadata",
",",
"version",
")",
"model",
".",
"description",
".",
"metadata",
".",
"userDefined",
".",
"update",
"(",
"user_defined_metadata",
")",
"from",
"coremltools",
".",
"models",
".",
"utils",
"import",
"save_spec",
"as",
"_save_spec",
"_save_spec",
"(",
"model",
",",
"filename",
")"
] | 46.469758 | 21.743952 |
def transitive_invalidation_hash(self, fingerprint_strategy=None, depth=0):
"""
:API: public
:param FingerprintStrategy fingerprint_strategy: optional fingerprint strategy to use to compute
the fingerprint of a target
:return: A fingerprint representing this target and all of its dependencies.
The return value can be `None`, indicating that this target and all of its transitive dependencies
did not contribute to the fingerprint, according to the provided FingerprintStrategy.
:rtype: string
"""
if depth > self._MAX_RECURSION_DEPTH:
# NB(zundel) without this catch, we'll eventually hit the python stack limit
# RuntimeError: maximum recursion depth exceeded while calling a Python object
raise self.RecursiveDepthError("Max depth of {} exceeded.".format(self._MAX_RECURSION_DEPTH))
fingerprint_strategy = fingerprint_strategy or DefaultFingerprintStrategy()
direct = (depth == 0 and fingerprint_strategy.direct(self))
if direct:
fingerprint_map = self._cached_direct_transitive_fingerprint_map
else:
fingerprint_map = self._cached_all_transitive_fingerprint_map
if fingerprint_strategy not in fingerprint_map:
hasher = sha1()
def dep_hash_iter():
dep_list = fingerprint_strategy.dependencies(self) if direct else self.dependencies
for dep in dep_list:
try:
if direct:
dep_hash = dep.invalidation_hash(fingerprint_strategy)
else:
dep_hash = dep.transitive_invalidation_hash(fingerprint_strategy, depth=depth+1)
if dep_hash is not None:
yield dep_hash
except self.RecursiveDepthError as e:
raise self.RecursiveDepthError("{message}\n referenced from {spec}"
.format(message=e, spec=dep.address.spec))
dep_hashes = sorted(list(dep_hash_iter()))
for dep_hash in dep_hashes:
hasher.update(dep_hash.encode('utf-8'))
target_hash = self.invalidation_hash(fingerprint_strategy)
if target_hash is None and not dep_hashes:
return None
dependencies_hash = hasher.hexdigest()[:12]
combined_hash = '{target_hash}.{deps_hash}'.format(target_hash=target_hash,
deps_hash=dependencies_hash)
fingerprint_map[fingerprint_strategy] = combined_hash
return fingerprint_map[fingerprint_strategy]
|
[
"def",
"transitive_invalidation_hash",
"(",
"self",
",",
"fingerprint_strategy",
"=",
"None",
",",
"depth",
"=",
"0",
")",
":",
"if",
"depth",
">",
"self",
".",
"_MAX_RECURSION_DEPTH",
":",
"# NB(zundel) without this catch, we'll eventually hit the python stack limit",
"# RuntimeError: maximum recursion depth exceeded while calling a Python object",
"raise",
"self",
".",
"RecursiveDepthError",
"(",
"\"Max depth of {} exceeded.\"",
".",
"format",
"(",
"self",
".",
"_MAX_RECURSION_DEPTH",
")",
")",
"fingerprint_strategy",
"=",
"fingerprint_strategy",
"or",
"DefaultFingerprintStrategy",
"(",
")",
"direct",
"=",
"(",
"depth",
"==",
"0",
"and",
"fingerprint_strategy",
".",
"direct",
"(",
"self",
")",
")",
"if",
"direct",
":",
"fingerprint_map",
"=",
"self",
".",
"_cached_direct_transitive_fingerprint_map",
"else",
":",
"fingerprint_map",
"=",
"self",
".",
"_cached_all_transitive_fingerprint_map",
"if",
"fingerprint_strategy",
"not",
"in",
"fingerprint_map",
":",
"hasher",
"=",
"sha1",
"(",
")",
"def",
"dep_hash_iter",
"(",
")",
":",
"dep_list",
"=",
"fingerprint_strategy",
".",
"dependencies",
"(",
"self",
")",
"if",
"direct",
"else",
"self",
".",
"dependencies",
"for",
"dep",
"in",
"dep_list",
":",
"try",
":",
"if",
"direct",
":",
"dep_hash",
"=",
"dep",
".",
"invalidation_hash",
"(",
"fingerprint_strategy",
")",
"else",
":",
"dep_hash",
"=",
"dep",
".",
"transitive_invalidation_hash",
"(",
"fingerprint_strategy",
",",
"depth",
"=",
"depth",
"+",
"1",
")",
"if",
"dep_hash",
"is",
"not",
"None",
":",
"yield",
"dep_hash",
"except",
"self",
".",
"RecursiveDepthError",
"as",
"e",
":",
"raise",
"self",
".",
"RecursiveDepthError",
"(",
"\"{message}\\n referenced from {spec}\"",
".",
"format",
"(",
"message",
"=",
"e",
",",
"spec",
"=",
"dep",
".",
"address",
".",
"spec",
")",
")",
"dep_hashes",
"=",
"sorted",
"(",
"list",
"(",
"dep_hash_iter",
"(",
")",
")",
")",
"for",
"dep_hash",
"in",
"dep_hashes",
":",
"hasher",
".",
"update",
"(",
"dep_hash",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"target_hash",
"=",
"self",
".",
"invalidation_hash",
"(",
"fingerprint_strategy",
")",
"if",
"target_hash",
"is",
"None",
"and",
"not",
"dep_hashes",
":",
"return",
"None",
"dependencies_hash",
"=",
"hasher",
".",
"hexdigest",
"(",
")",
"[",
":",
"12",
"]",
"combined_hash",
"=",
"'{target_hash}.{deps_hash}'",
".",
"format",
"(",
"target_hash",
"=",
"target_hash",
",",
"deps_hash",
"=",
"dependencies_hash",
")",
"fingerprint_map",
"[",
"fingerprint_strategy",
"]",
"=",
"combined_hash",
"return",
"fingerprint_map",
"[",
"fingerprint_strategy",
"]"
] | 46.653846 | 27.461538 |
def compute_potentials_analytical_hs(grid, configs_raw, rho):
"""Compute the potential superpositions of each current dipole in the
configurations, using the provided resistivity
Parameters
----------
grid:
crt_grid object with loaded FE grid. Used for the electrode positions
configs_raw: numpy.ndarray
Nx4 array containing N four-point spreads
rho: float
resistivity of half-space
Returns
-------
potentials: list
List containing N arrays, each of size M (nr of grid nodes)
"""
potentials = []
nodes_sorted = grid.nodes['sorted']
nodes_raw = grid.nodes['sorted']
for config in configs_raw:
print('potential configs', config)
# determine distance of all nodes to both electrodes
e1_node = grid.get_electrode_node(config[0])
print('e1_node', e1_node)
electrode1 = nodes_sorted[e1_node][1:3]
# electrode1 = nodes_sorted[config[0]][1:3]
r1 = np.sqrt(
(nodes_raw[:, 1] - electrode1[0]) ** 2 +
(nodes_raw[:, 2] - electrode1[1]) ** 2
)
# electrode2 = nodes_sorted[config[1]][1:3]
e2_node = grid.get_electrode_node(config[1])
print('e2_node', e2_node)
electrode2 = nodes_sorted[e2_node][1:3]
r2 = np.sqrt(
(nodes_raw[:, 1] - electrode2[0]) ** 2 +
(nodes_raw[:, 2] - electrode2[1]) ** 2
)
pot1 = pot_ana(r1, rho)
pot2 = - pot_ana(r2, rho)
pot12 = pot1 + pot2
potentials.append(pot12)
return potentials
|
[
"def",
"compute_potentials_analytical_hs",
"(",
"grid",
",",
"configs_raw",
",",
"rho",
")",
":",
"potentials",
"=",
"[",
"]",
"nodes_sorted",
"=",
"grid",
".",
"nodes",
"[",
"'sorted'",
"]",
"nodes_raw",
"=",
"grid",
".",
"nodes",
"[",
"'sorted'",
"]",
"for",
"config",
"in",
"configs_raw",
":",
"print",
"(",
"'potential configs'",
",",
"config",
")",
"# determine distance of all nodes to both electrodes",
"e1_node",
"=",
"grid",
".",
"get_electrode_node",
"(",
"config",
"[",
"0",
"]",
")",
"print",
"(",
"'e1_node'",
",",
"e1_node",
")",
"electrode1",
"=",
"nodes_sorted",
"[",
"e1_node",
"]",
"[",
"1",
":",
"3",
"]",
"# electrode1 = nodes_sorted[config[0]][1:3]",
"r1",
"=",
"np",
".",
"sqrt",
"(",
"(",
"nodes_raw",
"[",
":",
",",
"1",
"]",
"-",
"electrode1",
"[",
"0",
"]",
")",
"**",
"2",
"+",
"(",
"nodes_raw",
"[",
":",
",",
"2",
"]",
"-",
"electrode1",
"[",
"1",
"]",
")",
"**",
"2",
")",
"# electrode2 = nodes_sorted[config[1]][1:3]",
"e2_node",
"=",
"grid",
".",
"get_electrode_node",
"(",
"config",
"[",
"1",
"]",
")",
"print",
"(",
"'e2_node'",
",",
"e2_node",
")",
"electrode2",
"=",
"nodes_sorted",
"[",
"e2_node",
"]",
"[",
"1",
":",
"3",
"]",
"r2",
"=",
"np",
".",
"sqrt",
"(",
"(",
"nodes_raw",
"[",
":",
",",
"1",
"]",
"-",
"electrode2",
"[",
"0",
"]",
")",
"**",
"2",
"+",
"(",
"nodes_raw",
"[",
":",
",",
"2",
"]",
"-",
"electrode2",
"[",
"1",
"]",
")",
"**",
"2",
")",
"pot1",
"=",
"pot_ana",
"(",
"r1",
",",
"rho",
")",
"pot2",
"=",
"-",
"pot_ana",
"(",
"r2",
",",
"rho",
")",
"pot12",
"=",
"pot1",
"+",
"pot2",
"potentials",
".",
"append",
"(",
"pot12",
")",
"return",
"potentials"
] | 32.765957 | 17.021277 |
def save_user(user, name, save=None): # noqa: E501
"""Save a script
Save a script # noqa: E501
:param user: Get user with this name
:type user: str
:param name: Get status of a driver with this name
:type name: str
:param save: The data needed to save this user
:type save: dict | bytes
:rtype: Response
"""
if connexion.request.is_json:
save = Save.from_dict(connexion.request.get_json()) # noqa: E501
response = errorIfUnauthorized(role='admin')
if response:
return response
else:
response = ApitaxResponse()
driver: Driver = LoadedDrivers.getDriver(name)
user: User = mapUserToUser(save.script)
if driver.saveApitaxUser(user):
return Response(status=200, body=response.getResponseBody())
return ErrorResponse(status=500, message='Failed to create user')
|
[
"def",
"save_user",
"(",
"user",
",",
"name",
",",
"save",
"=",
"None",
")",
":",
"# noqa: E501",
"if",
"connexion",
".",
"request",
".",
"is_json",
":",
"save",
"=",
"Save",
".",
"from_dict",
"(",
"connexion",
".",
"request",
".",
"get_json",
"(",
")",
")",
"# noqa: E501",
"response",
"=",
"errorIfUnauthorized",
"(",
"role",
"=",
"'admin'",
")",
"if",
"response",
":",
"return",
"response",
"else",
":",
"response",
"=",
"ApitaxResponse",
"(",
")",
"driver",
":",
"Driver",
"=",
"LoadedDrivers",
".",
"getDriver",
"(",
"name",
")",
"user",
":",
"User",
"=",
"mapUserToUser",
"(",
"save",
".",
"script",
")",
"if",
"driver",
".",
"saveApitaxUser",
"(",
"user",
")",
":",
"return",
"Response",
"(",
"status",
"=",
"200",
",",
"body",
"=",
"response",
".",
"getResponseBody",
"(",
")",
")",
"return",
"ErrorResponse",
"(",
"status",
"=",
"500",
",",
"message",
"=",
"'Failed to create user'",
")"
] | 28 | 19.966667 |
def info(gandi, email):
"""Display information about a mailbox."""
login, domain = email
output_keys = ['login', 'aliases', 'fallback', 'quota', 'responder']
mailbox = gandi.mail.info(domain, login)
output_mailbox(gandi, mailbox, output_keys)
return mailbox
|
[
"def",
"info",
"(",
"gandi",
",",
"email",
")",
":",
"login",
",",
"domain",
"=",
"email",
"output_keys",
"=",
"[",
"'login'",
",",
"'aliases'",
",",
"'fallback'",
",",
"'quota'",
",",
"'responder'",
"]",
"mailbox",
"=",
"gandi",
".",
"mail",
".",
"info",
"(",
"domain",
",",
"login",
")",
"output_mailbox",
"(",
"gandi",
",",
"mailbox",
",",
"output_keys",
")",
"return",
"mailbox"
] | 30.555556 | 19.666667 |
def is_year(self):
"""Determine if a data record is of type YEAR."""
dt = DATA_TYPES['year']
if dt['min'] and dt['max']:
if type(self.data) is dt['type'] and dt['min'] < self.data < dt['max']:
self.type = 'year'.upper()
self.len = None
return True
|
[
"def",
"is_year",
"(",
"self",
")",
":",
"dt",
"=",
"DATA_TYPES",
"[",
"'year'",
"]",
"if",
"dt",
"[",
"'min'",
"]",
"and",
"dt",
"[",
"'max'",
"]",
":",
"if",
"type",
"(",
"self",
".",
"data",
")",
"is",
"dt",
"[",
"'type'",
"]",
"and",
"dt",
"[",
"'min'",
"]",
"<",
"self",
".",
"data",
"<",
"dt",
"[",
"'max'",
"]",
":",
"self",
".",
"type",
"=",
"'year'",
".",
"upper",
"(",
")",
"self",
".",
"len",
"=",
"None",
"return",
"True"
] | 40.5 | 12.875 |
def process_tree_files(tree):
""" process_tree_files: Download files from nodes
Args:
tree (ChannelManager): manager to handle communication to Kolibri Studio
Returns: None
"""
# Fill in values necessary for next steps
config.LOGGER.info("Processing content...")
files_to_diff = tree.process_tree(tree.channel)
config.SUSHI_BAR_CLIENT.report_statistics(files_to_diff, topic_count=tree.channel.get_topic_count())
tree.check_for_files_failed()
return files_to_diff, config.FAILED_FILES
|
[
"def",
"process_tree_files",
"(",
"tree",
")",
":",
"# Fill in values necessary for next steps",
"config",
".",
"LOGGER",
".",
"info",
"(",
"\"Processing content...\"",
")",
"files_to_diff",
"=",
"tree",
".",
"process_tree",
"(",
"tree",
".",
"channel",
")",
"config",
".",
"SUSHI_BAR_CLIENT",
".",
"report_statistics",
"(",
"files_to_diff",
",",
"topic_count",
"=",
"tree",
".",
"channel",
".",
"get_topic_count",
"(",
")",
")",
"tree",
".",
"check_for_files_failed",
"(",
")",
"return",
"files_to_diff",
",",
"config",
".",
"FAILED_FILES"
] | 44.333333 | 16.666667 |
def hue(self, img1, img2):
"""Applies the hue blend mode.
Hues image img1 with image img2.
The hue filter replaces the hues of pixels in img1
with the hues of pixels in img2.
Returns a composite image with the alpha channel retained.
"""
import colorsys
p1 = list(img1.getdata())
p2 = list(img2.getdata())
for i in range(len(p1)):
r1, g1, b1, a1 = p1[i]
r1 = r1 / 255.0
g1 = g1 / 255.0
b1 = b1 / 255.0
h1, s1, v1 = colorsys.rgb_to_hsv(r1, g1, b1)
r2, g2, b2, a2 = p2[i]
r2 = r2 / 255.0
g2 = g2 / 255.0
b2 = b2 / 255.0
h2, s2, v2 = colorsys.rgb_to_hsv(r2, g2, b2)
r3, g3, b3 = colorsys.hsv_to_rgb(h2, s1, v1)
r3 = int(r3*255)
g3 = int(g3*255)
b3 = int(b3*255)
p1[i] = (r3, g3, b3, a1)
img = Image.new("RGBA", img1.size, 255)
img.putdata(p1)
return img
|
[
"def",
"hue",
"(",
"self",
",",
"img1",
",",
"img2",
")",
":",
"import",
"colorsys",
"p1",
"=",
"list",
"(",
"img1",
".",
"getdata",
"(",
")",
")",
"p2",
"=",
"list",
"(",
"img2",
".",
"getdata",
"(",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"p1",
")",
")",
":",
"r1",
",",
"g1",
",",
"b1",
",",
"a1",
"=",
"p1",
"[",
"i",
"]",
"r1",
"=",
"r1",
"/",
"255.0",
"g1",
"=",
"g1",
"/",
"255.0",
"b1",
"=",
"b1",
"/",
"255.0",
"h1",
",",
"s1",
",",
"v1",
"=",
"colorsys",
".",
"rgb_to_hsv",
"(",
"r1",
",",
"g1",
",",
"b1",
")",
"r2",
",",
"g2",
",",
"b2",
",",
"a2",
"=",
"p2",
"[",
"i",
"]",
"r2",
"=",
"r2",
"/",
"255.0",
"g2",
"=",
"g2",
"/",
"255.0",
"b2",
"=",
"b2",
"/",
"255.0",
"h2",
",",
"s2",
",",
"v2",
"=",
"colorsys",
".",
"rgb_to_hsv",
"(",
"r2",
",",
"g2",
",",
"b2",
")",
"r3",
",",
"g3",
",",
"b3",
"=",
"colorsys",
".",
"hsv_to_rgb",
"(",
"h2",
",",
"s1",
",",
"v1",
")",
"r3",
"=",
"int",
"(",
"r3",
"*",
"255",
")",
"g3",
"=",
"int",
"(",
"g3",
"*",
"255",
")",
"b3",
"=",
"int",
"(",
"b3",
"*",
"255",
")",
"p1",
"[",
"i",
"]",
"=",
"(",
"r3",
",",
"g3",
",",
"b3",
",",
"a1",
")",
"img",
"=",
"Image",
".",
"new",
"(",
"\"RGBA\"",
",",
"img1",
".",
"size",
",",
"255",
")",
"img",
".",
"putdata",
"(",
"p1",
")",
"return",
"img"
] | 26.55 | 17.625 |
def _get_corpus_properties(self, corpus_name):
"""Check whether a corpus is available for import.
:type corpus_name: str
:param corpus_name: Name of available corpus.
:rtype : str
"""
try:
# corpora = LANGUAGE_CORPORA[self.language]
corpora = self.all_corpora
except NameError as name_error:
msg = 'Corpus not available for language ' \
'"%s": %s' % (self.language, name_error)
logger.error(msg)
raise CorpusImportError(msg)
for corpus_properties in corpora:
if corpus_properties['name'] == corpus_name:
return corpus_properties
msg = 'Corpus "%s" not available for the ' \
'"%s" language.' % (corpus_name, self.language)
logger.error(msg)
raise CorpusImportError(msg)
|
[
"def",
"_get_corpus_properties",
"(",
"self",
",",
"corpus_name",
")",
":",
"try",
":",
"# corpora = LANGUAGE_CORPORA[self.language]",
"corpora",
"=",
"self",
".",
"all_corpora",
"except",
"NameError",
"as",
"name_error",
":",
"msg",
"=",
"'Corpus not available for language '",
"'\"%s\": %s'",
"%",
"(",
"self",
".",
"language",
",",
"name_error",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"CorpusImportError",
"(",
"msg",
")",
"for",
"corpus_properties",
"in",
"corpora",
":",
"if",
"corpus_properties",
"[",
"'name'",
"]",
"==",
"corpus_name",
":",
"return",
"corpus_properties",
"msg",
"=",
"'Corpus \"%s\" not available for the '",
"'\"%s\" language.'",
"%",
"(",
"corpus_name",
",",
"self",
".",
"language",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"CorpusImportError",
"(",
"msg",
")"
] | 40.761905 | 9.952381 |
def addItem(self, itemType, itemContents, itemID=None):
"""
:param str itemType: The type of the item, note, place, todo
:param dict itemContents: A dictionary of the item contents
:param int itemID: When editing a note, send the ID along with it
"""
if itemType not in self.noteDB.collection_names():
fields = [(ii, pymongo.TEXT) for ii in itemContents]
self.noteDB[itemType].ensure_index(fields)
collection = self.noteDB[itemType]
if itemID is None:
itemContents['timestamps'] = [time.time()]
itemID = self.getNewID()
itemContents["ID"] = itemID
collection.insert(itemContents)
else:
_id = collection.find_one({"ID": itemID})["_id"]
timestamps = collection.find_one({"ID": itemID})["timestamps"]
timestamps.append(time.time())
itemContents["timestamps"] = timestamps
itemContents["ID"] = itemID
collection.update({"_id": _id}, itemContents)
return itemID
|
[
"def",
"addItem",
"(",
"self",
",",
"itemType",
",",
"itemContents",
",",
"itemID",
"=",
"None",
")",
":",
"if",
"itemType",
"not",
"in",
"self",
".",
"noteDB",
".",
"collection_names",
"(",
")",
":",
"fields",
"=",
"[",
"(",
"ii",
",",
"pymongo",
".",
"TEXT",
")",
"for",
"ii",
"in",
"itemContents",
"]",
"self",
".",
"noteDB",
"[",
"itemType",
"]",
".",
"ensure_index",
"(",
"fields",
")",
"collection",
"=",
"self",
".",
"noteDB",
"[",
"itemType",
"]",
"if",
"itemID",
"is",
"None",
":",
"itemContents",
"[",
"'timestamps'",
"]",
"=",
"[",
"time",
".",
"time",
"(",
")",
"]",
"itemID",
"=",
"self",
".",
"getNewID",
"(",
")",
"itemContents",
"[",
"\"ID\"",
"]",
"=",
"itemID",
"collection",
".",
"insert",
"(",
"itemContents",
")",
"else",
":",
"_id",
"=",
"collection",
".",
"find_one",
"(",
"{",
"\"ID\"",
":",
"itemID",
"}",
")",
"[",
"\"_id\"",
"]",
"timestamps",
"=",
"collection",
".",
"find_one",
"(",
"{",
"\"ID\"",
":",
"itemID",
"}",
")",
"[",
"\"timestamps\"",
"]",
"timestamps",
".",
"append",
"(",
"time",
".",
"time",
"(",
")",
")",
"itemContents",
"[",
"\"timestamps\"",
"]",
"=",
"timestamps",
"itemContents",
"[",
"\"ID\"",
"]",
"=",
"itemID",
"collection",
".",
"update",
"(",
"{",
"\"_id\"",
":",
"_id",
"}",
",",
"itemContents",
")",
"return",
"itemID"
] | 39.62963 | 18.518519 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.