text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def get_mfd(self, slip, area, shear_modulus=30.0):
'''
Calculates activity rate on the fault
:param float slip:
Slip rate in mm/yr
:param fault_width:
Width of the fault (km)
:param float disp_length_ratio:
Displacement to length ratio (dimensionless)
:param float shear_modulus:
Shear modulus of the fault (GPa)
:returns:
* Minimum Magnitude (float)
* Bin width (float)
* Occurrence Rates (numpy.ndarray)
'''
# Working in Nm so convert: shear_modulus - GPa -> Nm
# area - km ** 2. -> m ** 2.
# slip - mm/yr -> m/yr
moment_rate = (shear_modulus * 1.E9) * (area * 1.E6) * (slip / 1000.)
moment_mag = _scale_moment(self.mmax, in_nm=True)
characteristic_rate = moment_rate / moment_mag
if self.sigma and (fabs(self.sigma) > 1E-5):
self.mmin = self.mmax + (self.lower_bound * self.sigma)
mag_upper = self.mmax + (self.upper_bound * self.sigma)
mag_range = np.arange(self.mmin,
mag_upper + self.bin_width,
self.bin_width)
self.occurrence_rate = characteristic_rate * (
truncnorm.cdf(mag_range + (self.bin_width / 2.),
self.lower_bound, self.upper_bound,
loc=self.mmax, scale=self.sigma) -
truncnorm.cdf(mag_range - (self.bin_width / 2.),
self.lower_bound, self.upper_bound,
loc=self.mmax, scale=self.sigma))
else:
# Returns only a single rate
self.mmin = self.mmax
self.occurrence_rate = np.array([characteristic_rate], dtype=float)
return self.mmin, self.bin_width, self.occurrence_rate
|
[
"def",
"get_mfd",
"(",
"self",
",",
"slip",
",",
"area",
",",
"shear_modulus",
"=",
"30.0",
")",
":",
"# Working in Nm so convert: shear_modulus - GPa -> Nm",
"# area - km ** 2. -> m ** 2.",
"# slip - mm/yr -> m/yr",
"moment_rate",
"=",
"(",
"shear_modulus",
"*",
"1.E9",
")",
"*",
"(",
"area",
"*",
"1.E6",
")",
"*",
"(",
"slip",
"/",
"1000.",
")",
"moment_mag",
"=",
"_scale_moment",
"(",
"self",
".",
"mmax",
",",
"in_nm",
"=",
"True",
")",
"characteristic_rate",
"=",
"moment_rate",
"/",
"moment_mag",
"if",
"self",
".",
"sigma",
"and",
"(",
"fabs",
"(",
"self",
".",
"sigma",
")",
">",
"1E-5",
")",
":",
"self",
".",
"mmin",
"=",
"self",
".",
"mmax",
"+",
"(",
"self",
".",
"lower_bound",
"*",
"self",
".",
"sigma",
")",
"mag_upper",
"=",
"self",
".",
"mmax",
"+",
"(",
"self",
".",
"upper_bound",
"*",
"self",
".",
"sigma",
")",
"mag_range",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"mmin",
",",
"mag_upper",
"+",
"self",
".",
"bin_width",
",",
"self",
".",
"bin_width",
")",
"self",
".",
"occurrence_rate",
"=",
"characteristic_rate",
"*",
"(",
"truncnorm",
".",
"cdf",
"(",
"mag_range",
"+",
"(",
"self",
".",
"bin_width",
"/",
"2.",
")",
",",
"self",
".",
"lower_bound",
",",
"self",
".",
"upper_bound",
",",
"loc",
"=",
"self",
".",
"mmax",
",",
"scale",
"=",
"self",
".",
"sigma",
")",
"-",
"truncnorm",
".",
"cdf",
"(",
"mag_range",
"-",
"(",
"self",
".",
"bin_width",
"/",
"2.",
")",
",",
"self",
".",
"lower_bound",
",",
"self",
".",
"upper_bound",
",",
"loc",
"=",
"self",
".",
"mmax",
",",
"scale",
"=",
"self",
".",
"sigma",
")",
")",
"else",
":",
"# Returns only a single rate",
"self",
".",
"mmin",
"=",
"self",
".",
"mmax",
"self",
".",
"occurrence_rate",
"=",
"np",
".",
"array",
"(",
"[",
"characteristic_rate",
"]",
",",
"dtype",
"=",
"float",
")",
"return",
"self",
".",
"mmin",
",",
"self",
".",
"bin_width",
",",
"self",
".",
"occurrence_rate"
] | 40.608696 | 19.173913 |
def _select_concept(self, line):
"""try to match a class and load it"""
g = self.current['graph']
if not line:
out = g.all_skos_concepts
using_pattern = False
else:
using_pattern = True
if line.isdigit():
line = int(line)
out = g.get_skos(line)
if out:
if type(out) == type([]):
choice = self._selectFromList(out, using_pattern, "concept")
if choice:
self.currentEntity = {'name': choice.locale or choice.uri,
'object': choice, 'type': 'concept'}
else:
self.currentEntity = {'name': out.locale or out.uri,
'object': out, 'type': 'concept'}
# ..finally:
if self.currentEntity:
self._print_entity_intro(entity=self.currentEntity)
else:
print("not found")
|
[
"def",
"_select_concept",
"(",
"self",
",",
"line",
")",
":",
"g",
"=",
"self",
".",
"current",
"[",
"'graph'",
"]",
"if",
"not",
"line",
":",
"out",
"=",
"g",
".",
"all_skos_concepts",
"using_pattern",
"=",
"False",
"else",
":",
"using_pattern",
"=",
"True",
"if",
"line",
".",
"isdigit",
"(",
")",
":",
"line",
"=",
"int",
"(",
"line",
")",
"out",
"=",
"g",
".",
"get_skos",
"(",
"line",
")",
"if",
"out",
":",
"if",
"type",
"(",
"out",
")",
"==",
"type",
"(",
"[",
"]",
")",
":",
"choice",
"=",
"self",
".",
"_selectFromList",
"(",
"out",
",",
"using_pattern",
",",
"\"concept\"",
")",
"if",
"choice",
":",
"self",
".",
"currentEntity",
"=",
"{",
"'name'",
":",
"choice",
".",
"locale",
"or",
"choice",
".",
"uri",
",",
"'object'",
":",
"choice",
",",
"'type'",
":",
"'concept'",
"}",
"else",
":",
"self",
".",
"currentEntity",
"=",
"{",
"'name'",
":",
"out",
".",
"locale",
"or",
"out",
".",
"uri",
",",
"'object'",
":",
"out",
",",
"'type'",
":",
"'concept'",
"}",
"# ..finally:",
"if",
"self",
".",
"currentEntity",
":",
"self",
".",
"_print_entity_intro",
"(",
"entity",
"=",
"self",
".",
"currentEntity",
")",
"else",
":",
"print",
"(",
"\"not found\"",
")"
] | 37.538462 | 17.923077 |
def process_commenters(self):
"""Group comments by author."""
for index, submission in enumerate(self.submissions.values()):
if submission.num_comments == 0:
continue
real_submission = self.reddit.submission(id=submission.id)
real_submission.comment_sort = 'top'
for i in range(3):
try:
real_submission.comments.replace_more(limit=0)
break
except RequestException:
if i >= 2:
raise
logger.debug('Failed to fetch submission {}, retrying'
.format(submission.id))
self.comments.extend(MiniComment(comment, submission)
for comment in real_submission.comments.list()
if self.distinguished
or comment.distinguished is None)
if index % 50 == 49:
logger.debug('Completed: {:4d}/{} submissions'
.format(index + 1, len(self.submissions)))
# Clean up to reduce memory usage
submission = None
gc.collect()
self.comments.sort(key=lambda x: x.created_utc)
for comment in self.comments:
if comment.author:
self.commenters[comment.author].append(comment)
|
[
"def",
"process_commenters",
"(",
"self",
")",
":",
"for",
"index",
",",
"submission",
"in",
"enumerate",
"(",
"self",
".",
"submissions",
".",
"values",
"(",
")",
")",
":",
"if",
"submission",
".",
"num_comments",
"==",
"0",
":",
"continue",
"real_submission",
"=",
"self",
".",
"reddit",
".",
"submission",
"(",
"id",
"=",
"submission",
".",
"id",
")",
"real_submission",
".",
"comment_sort",
"=",
"'top'",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"try",
":",
"real_submission",
".",
"comments",
".",
"replace_more",
"(",
"limit",
"=",
"0",
")",
"break",
"except",
"RequestException",
":",
"if",
"i",
">=",
"2",
":",
"raise",
"logger",
".",
"debug",
"(",
"'Failed to fetch submission {}, retrying'",
".",
"format",
"(",
"submission",
".",
"id",
")",
")",
"self",
".",
"comments",
".",
"extend",
"(",
"MiniComment",
"(",
"comment",
",",
"submission",
")",
"for",
"comment",
"in",
"real_submission",
".",
"comments",
".",
"list",
"(",
")",
"if",
"self",
".",
"distinguished",
"or",
"comment",
".",
"distinguished",
"is",
"None",
")",
"if",
"index",
"%",
"50",
"==",
"49",
":",
"logger",
".",
"debug",
"(",
"'Completed: {:4d}/{} submissions'",
".",
"format",
"(",
"index",
"+",
"1",
",",
"len",
"(",
"self",
".",
"submissions",
")",
")",
")",
"# Clean up to reduce memory usage",
"submission",
"=",
"None",
"gc",
".",
"collect",
"(",
")",
"self",
".",
"comments",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"created_utc",
")",
"for",
"comment",
"in",
"self",
".",
"comments",
":",
"if",
"comment",
".",
"author",
":",
"self",
".",
"commenters",
"[",
"comment",
".",
"author",
"]",
".",
"append",
"(",
"comment",
")"
] | 40.171429 | 19.685714 |
def get_associations(self, env):
"""
Get all the associations for this env.
Root cannot have associations, so return None for root.
returns a map of hostnames to environments.
"""
if env.is_root:
return None
associations = self.associations.get_all()
return [assoc for assoc in associations
if associations[assoc] == self._get_view_path(env)]
|
[
"def",
"get_associations",
"(",
"self",
",",
"env",
")",
":",
"if",
"env",
".",
"is_root",
":",
"return",
"None",
"associations",
"=",
"self",
".",
"associations",
".",
"get_all",
"(",
")",
"return",
"[",
"assoc",
"for",
"assoc",
"in",
"associations",
"if",
"associations",
"[",
"assoc",
"]",
"==",
"self",
".",
"_get_view_path",
"(",
"env",
")",
"]"
] | 28.266667 | 19.066667 |
def b64_hmac_md5(key, data):
"""
return base64-encoded HMAC-MD5 for key and data, with trailing '='
stripped.
"""
bdigest = base64.b64encode(hmac.new(key, data, _md5).digest()).strip().decode("utf-8")
return re.sub('=+$', '', bdigest)
|
[
"def",
"b64_hmac_md5",
"(",
"key",
",",
"data",
")",
":",
"bdigest",
"=",
"base64",
".",
"b64encode",
"(",
"hmac",
".",
"new",
"(",
"key",
",",
"data",
",",
"_md5",
")",
".",
"digest",
"(",
")",
")",
".",
"strip",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"return",
"re",
".",
"sub",
"(",
"'=+$'",
",",
"''",
",",
"bdigest",
")"
] | 36 | 17.428571 |
def _depaginate_all(self, url):
"""GETs the url provided and traverses the 'next' url that's
returned while storing the data in a list. Returns a single list of all
items.
"""
items = []
for x in self._depagination_generator(url):
items += x
return items
|
[
"def",
"_depaginate_all",
"(",
"self",
",",
"url",
")",
":",
"items",
"=",
"[",
"]",
"for",
"x",
"in",
"self",
".",
"_depagination_generator",
"(",
"url",
")",
":",
"items",
"+=",
"x",
"return",
"items"
] | 34.888889 | 16.111111 |
def get_build_metadata(key):
'''get_build_metadata will return metadata about an instance from within it.
:param key: the key to look up
'''
headers = {"Metadata-Flavor":"Google"}
url = "http://metadata.google.internal/computeMetadata/v1/instance/attributes/%s" %(key)
response = requests.get(url=url,headers=headers)
if response.status_code == 200:
return response.text
return None
|
[
"def",
"get_build_metadata",
"(",
"key",
")",
":",
"headers",
"=",
"{",
"\"Metadata-Flavor\"",
":",
"\"Google\"",
"}",
"url",
"=",
"\"http://metadata.google.internal/computeMetadata/v1/instance/attributes/%s\"",
"%",
"(",
"key",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"response",
".",
"text",
"return",
"None"
] | 42.1 | 20.7 |
def list_assignment_submissions_courses(self, course_id, assignment_id, grouped=None, include=None):
"""
List assignment submissions.
Get all existing submissions for an assignment.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - assignment_id
"""ID"""
path["assignment_id"] = assignment_id
# OPTIONAL - include
"""Associations to include with the group. "group" will add group_id and group_name."""
if include is not None:
self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "assignment", "visibility", "course", "user", "group"])
params["include"] = include
# OPTIONAL - grouped
"""If this argument is true, the response will be grouped by student groups."""
if grouped is not None:
params["grouped"] = grouped
self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions".format(**path), data=data, params=params, all_pages=True)
|
[
"def",
"list_assignment_submissions_courses",
"(",
"self",
",",
"course_id",
",",
"assignment_id",
",",
"grouped",
"=",
"None",
",",
"include",
"=",
"None",
")",
":",
"path",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"# REQUIRED - PATH - course_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"course_id\"",
"]",
"=",
"course_id",
"# REQUIRED - PATH - assignment_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"assignment_id\"",
"]",
"=",
"assignment_id",
"# OPTIONAL - include\r",
"\"\"\"Associations to include with the group. \"group\" will add group_id and group_name.\"\"\"",
"if",
"include",
"is",
"not",
"None",
":",
"self",
".",
"_validate_enum",
"(",
"include",
",",
"[",
"\"submission_history\"",
",",
"\"submission_comments\"",
",",
"\"rubric_assessment\"",
",",
"\"assignment\"",
",",
"\"visibility\"",
",",
"\"course\"",
",",
"\"user\"",
",",
"\"group\"",
"]",
")",
"params",
"[",
"\"include\"",
"]",
"=",
"include",
"# OPTIONAL - grouped\r",
"\"\"\"If this argument is true, the response will be grouped by student groups.\"\"\"",
"if",
"grouped",
"is",
"not",
"None",
":",
"params",
"[",
"\"grouped\"",
"]",
"=",
"grouped",
"self",
".",
"logger",
".",
"debug",
"(",
"\"GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions with query params: {params} and form data: {data}\"",
".",
"format",
"(",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"*",
"*",
"path",
")",
")",
"return",
"self",
".",
"generic_request",
"(",
"\"GET\"",
",",
"\"/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions\"",
".",
"format",
"(",
"*",
"*",
"path",
")",
",",
"data",
"=",
"data",
",",
"params",
"=",
"params",
",",
"all_pages",
"=",
"True",
")"
] | 44.870968 | 26.903226 |
def path_has(self, **kwargs):
"""
Checks if the current path has a node spec'd by kwargs
"""
str_node = "/" # This node will always be "False"
for key, val in kwargs.items():
if key == 'node': str_node = val
if str_node in self.l_cwd:
return { 'found': True,
'indices': [i for i, x in enumerate(self.l_cwd) if x == str_node]}
else:
return { 'found': False,
'indices': [-1]}
|
[
"def",
"path_has",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"str_node",
"=",
"\"/\"",
"# This node will always be \"False\"",
"for",
"key",
",",
"val",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'node'",
":",
"str_node",
"=",
"val",
"if",
"str_node",
"in",
"self",
".",
"l_cwd",
":",
"return",
"{",
"'found'",
":",
"True",
",",
"'indices'",
":",
"[",
"i",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"self",
".",
"l_cwd",
")",
"if",
"x",
"==",
"str_node",
"]",
"}",
"else",
":",
"return",
"{",
"'found'",
":",
"False",
",",
"'indices'",
":",
"[",
"-",
"1",
"]",
"}"
] | 42.461538 | 11.846154 |
def label_correcting_get_cycle(self, j, pred):
'''
API:
label_correcting_get_cycle(self, labelled, pred)
Description:
In label correcting check cycle it is decided pred has a cycle and
nodes in the cycle are labelled. We will create a list of nodes
in the cycle using labelled and pred inputs.
Pre:
This method should be called from label_correcting_check_cycle(),
unless you are sure about what you are doing.
Input:
j: Node that predecessor is recently updated. We know that it is
in the cycle
pred: Predecessor dictionary that contains a cycle
Post:
Returns a list of nodes that represents cycle. It is in
[n_1, n_2, ..., n_k] form where the cycle has k nodes.
'''
cycle = []
cycle.append(j)
current = pred[j]
while current!=j:
cycle.append(current)
current = pred[current]
cycle.reverse()
return cycle
|
[
"def",
"label_correcting_get_cycle",
"(",
"self",
",",
"j",
",",
"pred",
")",
":",
"cycle",
"=",
"[",
"]",
"cycle",
".",
"append",
"(",
"j",
")",
"current",
"=",
"pred",
"[",
"j",
"]",
"while",
"current",
"!=",
"j",
":",
"cycle",
".",
"append",
"(",
"current",
")",
"current",
"=",
"pred",
"[",
"current",
"]",
"cycle",
".",
"reverse",
"(",
")",
"return",
"cycle"
] | 38.481481 | 22.259259 |
def validate(self, reason=True):
# type: (bool) -> list
"""Return the validation results and include an (optional) reason.
If reason keyword is true, the validation is returned for each validation
the [(<result: bool>, <reason:str>), ...]. If reason is False, only a single list of validation results
for each configured validator is returned.
:param reason: (optional) switch to indicate if the reason of the validation should be provided
:type reason: bool
:return: list of validation results [bool, bool, ...] or
a list of validation results, reasons [(bool, str), ...]
:rtype: list(bool) or list((bool, str))
:raises Exception: for incorrect validators or incompatible values
"""
self._validation_results = [validator.is_valid(self._value) for validator in getattr(self, '_validators')]
self._validation_reasons = [validator.get_reason() for validator in getattr(self, '_validators')]
if reason:
return list(zip(self._validation_results, self._validation_reasons))
else:
return self._validation_results
|
[
"def",
"validate",
"(",
"self",
",",
"reason",
"=",
"True",
")",
":",
"# type: (bool) -> list",
"self",
".",
"_validation_results",
"=",
"[",
"validator",
".",
"is_valid",
"(",
"self",
".",
"_value",
")",
"for",
"validator",
"in",
"getattr",
"(",
"self",
",",
"'_validators'",
")",
"]",
"self",
".",
"_validation_reasons",
"=",
"[",
"validator",
".",
"get_reason",
"(",
")",
"for",
"validator",
"in",
"getattr",
"(",
"self",
",",
"'_validators'",
")",
"]",
"if",
"reason",
":",
"return",
"list",
"(",
"zip",
"(",
"self",
".",
"_validation_results",
",",
"self",
".",
"_validation_reasons",
")",
")",
"else",
":",
"return",
"self",
".",
"_validation_results"
] | 52.227273 | 30.272727 |
def uuid_to_slug(uuid):
"""
Return IOTile Cloud compatible Device Slug
:param uuid: UUID
:return: string in the form of d--0000-0000-0000-0001
"""
if not isinstance(uuid, int):
raise ArgumentError("Invalid id that is not an integer", id=uuid)
if uuid < 0 or uuid > 0x7fffffff:
# For now, limiting support to a signed integer (which on some platforms, can be 32bits)
raise ArgumentError("Integer should be a positive number and smaller than 0x7fffffff", id=uuid)
return '--'.join(['d', int64gid(uuid)])
|
[
"def",
"uuid_to_slug",
"(",
"uuid",
")",
":",
"if",
"not",
"isinstance",
"(",
"uuid",
",",
"int",
")",
":",
"raise",
"ArgumentError",
"(",
"\"Invalid id that is not an integer\"",
",",
"id",
"=",
"uuid",
")",
"if",
"uuid",
"<",
"0",
"or",
"uuid",
">",
"0x7fffffff",
":",
"# For now, limiting support to a signed integer (which on some platforms, can be 32bits)",
"raise",
"ArgumentError",
"(",
"\"Integer should be a positive number and smaller than 0x7fffffff\"",
",",
"id",
"=",
"uuid",
")",
"return",
"'--'",
".",
"join",
"(",
"[",
"'d'",
",",
"int64gid",
"(",
"uuid",
")",
"]",
")"
] | 36.4 | 22.933333 |
def edge_history(self, source, target, attribute):
"""
Returns a dictionary of attribute vales for each Graph in the
:class:`.GraphCollection` for a single edge.
Parameters
----------
source : str
Identifier for source node.
target : str
Identifier for target node.
attribute : str
The attribute of interest; e.g. 'betweenness_centrality'
Returns
-------
history : dict
"""
return {attr['graph']: attr[attribute] for i, attr
in self.master_graph.edge[source][target].items()}
|
[
"def",
"edge_history",
"(",
"self",
",",
"source",
",",
"target",
",",
"attribute",
")",
":",
"return",
"{",
"attr",
"[",
"'graph'",
"]",
":",
"attr",
"[",
"attribute",
"]",
"for",
"i",
",",
"attr",
"in",
"self",
".",
"master_graph",
".",
"edge",
"[",
"source",
"]",
"[",
"target",
"]",
".",
"items",
"(",
")",
"}"
] | 29.238095 | 19.714286 |
def _symlink_check(name, target, force, user, group, win_owner):
'''
Check the symlink function
'''
changes = {}
if not os.path.exists(name) and not __salt__['file.is_link'](name):
changes['new'] = name
return None, 'Symlink {0} to {1} is set for creation'.format(
name, target
), changes
if __salt__['file.is_link'](name):
if __salt__['file.readlink'](name) != target:
changes['change'] = name
return None, 'Link {0} target is set to be changed to {1}'.format(
name, target
), changes
else:
result = True
msg = 'The symlink {0} is present'.format(name)
if not _check_symlink_ownership(name, user, group, win_owner):
result = None
changes['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name))
msg += (
', but the ownership of the symlink would be changed '
'from {2}:{3} to {0}:{1}'
).format(user, group, *_get_symlink_ownership(name))
return result, msg, changes
else:
if force:
return None, ('The file or directory {0} is set for removal to '
'make way for a new symlink targeting {1}'
.format(name, target)), changes
return False, ('File or directory exists where the symlink {0} '
'should be. Did you mean to use force?'.format(name)), changes
|
[
"def",
"_symlink_check",
"(",
"name",
",",
"target",
",",
"force",
",",
"user",
",",
"group",
",",
"win_owner",
")",
":",
"changes",
"=",
"{",
"}",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"name",
")",
"and",
"not",
"__salt__",
"[",
"'file.is_link'",
"]",
"(",
"name",
")",
":",
"changes",
"[",
"'new'",
"]",
"=",
"name",
"return",
"None",
",",
"'Symlink {0} to {1} is set for creation'",
".",
"format",
"(",
"name",
",",
"target",
")",
",",
"changes",
"if",
"__salt__",
"[",
"'file.is_link'",
"]",
"(",
"name",
")",
":",
"if",
"__salt__",
"[",
"'file.readlink'",
"]",
"(",
"name",
")",
"!=",
"target",
":",
"changes",
"[",
"'change'",
"]",
"=",
"name",
"return",
"None",
",",
"'Link {0} target is set to be changed to {1}'",
".",
"format",
"(",
"name",
",",
"target",
")",
",",
"changes",
"else",
":",
"result",
"=",
"True",
"msg",
"=",
"'The symlink {0} is present'",
".",
"format",
"(",
"name",
")",
"if",
"not",
"_check_symlink_ownership",
"(",
"name",
",",
"user",
",",
"group",
",",
"win_owner",
")",
":",
"result",
"=",
"None",
"changes",
"[",
"'ownership'",
"]",
"=",
"'{0}:{1}'",
".",
"format",
"(",
"*",
"_get_symlink_ownership",
"(",
"name",
")",
")",
"msg",
"+=",
"(",
"', but the ownership of the symlink would be changed '",
"'from {2}:{3} to {0}:{1}'",
")",
".",
"format",
"(",
"user",
",",
"group",
",",
"*",
"_get_symlink_ownership",
"(",
"name",
")",
")",
"return",
"result",
",",
"msg",
",",
"changes",
"else",
":",
"if",
"force",
":",
"return",
"None",
",",
"(",
"'The file or directory {0} is set for removal to '",
"'make way for a new symlink targeting {1}'",
".",
"format",
"(",
"name",
",",
"target",
")",
")",
",",
"changes",
"return",
"False",
",",
"(",
"'File or directory exists where the symlink {0} '",
"'should be. Did you mean to use force?'",
".",
"format",
"(",
"name",
")",
")",
",",
"changes"
] | 44.411765 | 22.588235 |
def is_readable(path):
'''
Check if a given path is readable by the current user.
:param path: The path to check
:returns: True or False
'''
if os.access(path, os.F_OK) and os.access(path, os.R_OK):
# The path exists and is readable
return True
# The path does not exist
return False
|
[
"def",
"is_readable",
"(",
"path",
")",
":",
"if",
"os",
".",
"access",
"(",
"path",
",",
"os",
".",
"F_OK",
")",
"and",
"os",
".",
"access",
"(",
"path",
",",
"os",
".",
"R_OK",
")",
":",
"# The path exists and is readable",
"return",
"True",
"# The path does not exist",
"return",
"False"
] | 22.928571 | 22.785714 |
def rpc(_cloud_rpc_name, **params):
"""
调用 LeanEngine 上的远程代码
与 cloud.run 类似,但是允许传入 leancloud.Object 作为参数,也允许传入 leancloud.Object 作为结果
:param name: 需要调用的远程 Cloud Code 的名称
:type name: basestring
:param params: 调用参数
:return: 调用结果
"""
encoded_params = {}
for key, value in params.items():
if isinstance(params, leancloud.Object):
encoded_params[key] = utils.encode(value._dump())
else:
encoded_params[key] = utils.encode(value)
response = leancloud.client.post('/call/{}'.format(_cloud_rpc_name), params=encoded_params)
content = response.json()
return utils.decode(None, content['result'])
|
[
"def",
"rpc",
"(",
"_cloud_rpc_name",
",",
"*",
"*",
"params",
")",
":",
"encoded_params",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"params",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"params",
",",
"leancloud",
".",
"Object",
")",
":",
"encoded_params",
"[",
"key",
"]",
"=",
"utils",
".",
"encode",
"(",
"value",
".",
"_dump",
"(",
")",
")",
"else",
":",
"encoded_params",
"[",
"key",
"]",
"=",
"utils",
".",
"encode",
"(",
"value",
")",
"response",
"=",
"leancloud",
".",
"client",
".",
"post",
"(",
"'/call/{}'",
".",
"format",
"(",
"_cloud_rpc_name",
")",
",",
"params",
"=",
"encoded_params",
")",
"content",
"=",
"response",
".",
"json",
"(",
")",
"return",
"utils",
".",
"decode",
"(",
"None",
",",
"content",
"[",
"'result'",
"]",
")"
] | 36.666667 | 15.222222 |
def publish(self, topic, schema=None, name=None):
"""
Publish this stream on a topic for other Streams applications to subscribe to.
A Streams application may publish a stream to allow other
Streams applications to subscribe to it. A subscriber
matches a publisher if the topic and schema match.
By default a stream is published using its schema.
A stream of :py:const:`Python objects <streamsx.topology.schema.CommonSchema.Python>` can be subscribed to by other Streams Python applications.
If a stream is published setting `schema` to
:py:const:`~streamsx.topology.schema.CommonSchema.Json`
then it is published as a stream of JSON objects.
Other Streams applications may subscribe to it regardless
of their implementation language.
If a stream is published setting `schema` to
:py:const:`~streamsx.topology.schema.CommonSchema.String`
then it is published as strings
Other Streams applications may subscribe to it regardless
of their implementation language.
Supported values of `schema` are only
:py:const:`~streamsx.topology.schema.CommonSchema.Json`
and
:py:const:`~streamsx.topology.schema.CommonSchema.String`.
Args:
topic(str): Topic to publish this stream to.
schema: Schema to publish. Defaults to the schema of this stream.
name(str): Name of the publish operator, defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. versionadded:: 1.6.1 `name` parameter.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance.
"""
sl = _SourceLocation(_source_info(), 'publish')
schema = streamsx.topology.schema._normalize(schema)
if schema is not None and self.oport.schema.schema() != schema.schema():
nc = None
if schema == streamsx.topology.schema.CommonSchema.Json:
schema_change = self.as_json()
elif schema == streamsx.topology.schema.CommonSchema.String:
schema_change = self.as_string()
else:
raise ValueError(schema)
if self._placeable:
self._colocate(schema_change, 'publish')
sp = schema_change.publish(topic, schema=schema, name=name)
sp._op().sl = sl
return sp
_name = self.topology.graph._requested_name(name, action="publish")
# publish is never stateful
op = self.topology.graph.addOperator("com.ibm.streamsx.topology.topic::Publish", params={'topic': topic}, sl=sl, name=_name, stateful=False)
op.addInputPort(outputPort=self.oport)
op._layout_group('Publish', name if name else _name)
sink = Sink(op)
if self._placeable:
self._colocate(sink, 'publish')
return sink
|
[
"def",
"publish",
"(",
"self",
",",
"topic",
",",
"schema",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"sl",
"=",
"_SourceLocation",
"(",
"_source_info",
"(",
")",
",",
"'publish'",
")",
"schema",
"=",
"streamsx",
".",
"topology",
".",
"schema",
".",
"_normalize",
"(",
"schema",
")",
"if",
"schema",
"is",
"not",
"None",
"and",
"self",
".",
"oport",
".",
"schema",
".",
"schema",
"(",
")",
"!=",
"schema",
".",
"schema",
"(",
")",
":",
"nc",
"=",
"None",
"if",
"schema",
"==",
"streamsx",
".",
"topology",
".",
"schema",
".",
"CommonSchema",
".",
"Json",
":",
"schema_change",
"=",
"self",
".",
"as_json",
"(",
")",
"elif",
"schema",
"==",
"streamsx",
".",
"topology",
".",
"schema",
".",
"CommonSchema",
".",
"String",
":",
"schema_change",
"=",
"self",
".",
"as_string",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"schema",
")",
"if",
"self",
".",
"_placeable",
":",
"self",
".",
"_colocate",
"(",
"schema_change",
",",
"'publish'",
")",
"sp",
"=",
"schema_change",
".",
"publish",
"(",
"topic",
",",
"schema",
"=",
"schema",
",",
"name",
"=",
"name",
")",
"sp",
".",
"_op",
"(",
")",
".",
"sl",
"=",
"sl",
"return",
"sp",
"_name",
"=",
"self",
".",
"topology",
".",
"graph",
".",
"_requested_name",
"(",
"name",
",",
"action",
"=",
"\"publish\"",
")",
"# publish is never stateful",
"op",
"=",
"self",
".",
"topology",
".",
"graph",
".",
"addOperator",
"(",
"\"com.ibm.streamsx.topology.topic::Publish\"",
",",
"params",
"=",
"{",
"'topic'",
":",
"topic",
"}",
",",
"sl",
"=",
"sl",
",",
"name",
"=",
"_name",
",",
"stateful",
"=",
"False",
")",
"op",
".",
"addInputPort",
"(",
"outputPort",
"=",
"self",
".",
"oport",
")",
"op",
".",
"_layout_group",
"(",
"'Publish'",
",",
"name",
"if",
"name",
"else",
"_name",
")",
"sink",
"=",
"Sink",
"(",
"op",
")",
"if",
"self",
".",
"_placeable",
":",
"self",
".",
"_colocate",
"(",
"sink",
",",
"'publish'",
")",
"return",
"sink"
] | 43.597015 | 23.955224 |
def authenticate(self):
"""
Authenticates with the PA Oauth system
"""
if self._auth_token is None or self._token_expiry < time.time():
self._perform_auth()
yield self._auth_token
|
[
"def",
"authenticate",
"(",
"self",
")",
":",
"if",
"self",
".",
"_auth_token",
"is",
"None",
"or",
"self",
".",
"_token_expiry",
"<",
"time",
".",
"time",
"(",
")",
":",
"self",
".",
"_perform_auth",
"(",
")",
"yield",
"self",
".",
"_auth_token"
] | 28.125 | 14.125 |
def parse_pseudo_lang(self, sel, m, has_selector):
"""Parse pseudo language."""
values = m.group('values')
patterns = []
for token in RE_VALUES.finditer(values):
if token.group('split'):
continue
value = token.group('value')
if value.startswith(('"', "'")):
parts = css_unescape(value[1:-1], True).split('-')
else:
parts = css_unescape(value).split('-')
new_parts = []
first = True
for part in parts:
if part == '*' and first:
new_parts.append('(?!x\b)[a-z0-9]+?')
elif part != '*':
new_parts.append(('' if first else '(-(?!x\b)[a-z0-9]+)*?\\-') + re.escape(part))
if first:
first = False
patterns.append(re.compile(r'^{}(?:-.*)?$'.format(''.join(new_parts)), re.I))
sel.lang.append(ct.SelectorLang(patterns))
has_selector = True
return has_selector
|
[
"def",
"parse_pseudo_lang",
"(",
"self",
",",
"sel",
",",
"m",
",",
"has_selector",
")",
":",
"values",
"=",
"m",
".",
"group",
"(",
"'values'",
")",
"patterns",
"=",
"[",
"]",
"for",
"token",
"in",
"RE_VALUES",
".",
"finditer",
"(",
"values",
")",
":",
"if",
"token",
".",
"group",
"(",
"'split'",
")",
":",
"continue",
"value",
"=",
"token",
".",
"group",
"(",
"'value'",
")",
"if",
"value",
".",
"startswith",
"(",
"(",
"'\"'",
",",
"\"'\"",
")",
")",
":",
"parts",
"=",
"css_unescape",
"(",
"value",
"[",
"1",
":",
"-",
"1",
"]",
",",
"True",
")",
".",
"split",
"(",
"'-'",
")",
"else",
":",
"parts",
"=",
"css_unescape",
"(",
"value",
")",
".",
"split",
"(",
"'-'",
")",
"new_parts",
"=",
"[",
"]",
"first",
"=",
"True",
"for",
"part",
"in",
"parts",
":",
"if",
"part",
"==",
"'*'",
"and",
"first",
":",
"new_parts",
".",
"append",
"(",
"'(?!x\\b)[a-z0-9]+?'",
")",
"elif",
"part",
"!=",
"'*'",
":",
"new_parts",
".",
"append",
"(",
"(",
"''",
"if",
"first",
"else",
"'(-(?!x\\b)[a-z0-9]+)*?\\\\-'",
")",
"+",
"re",
".",
"escape",
"(",
"part",
")",
")",
"if",
"first",
":",
"first",
"=",
"False",
"patterns",
".",
"append",
"(",
"re",
".",
"compile",
"(",
"r'^{}(?:-.*)?$'",
".",
"format",
"(",
"''",
".",
"join",
"(",
"new_parts",
")",
")",
",",
"re",
".",
"I",
")",
")",
"sel",
".",
"lang",
".",
"append",
"(",
"ct",
".",
"SelectorLang",
"(",
"patterns",
")",
")",
"has_selector",
"=",
"True",
"return",
"has_selector"
] | 36.892857 | 17.25 |
def createTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used. When ``path`` is specified, an external table is
created from the data at the given path. Otherwise a managed table is created.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created table.
:return: :class:`DataFrame`
"""
if path is not None:
options["path"] = path
if source is None:
source = self._sparkSession._wrapped._conf.defaultDataSourceName()
if schema is None:
df = self._jcatalog.createTable(tableName, source, options)
else:
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
scala_datatype = self._jsparkSession.parseDataType(schema.json())
df = self._jcatalog.createTable(tableName, source, scala_datatype, options)
return DataFrame(df, self._sparkSession._wrapped)
|
[
"def",
"createTable",
"(",
"self",
",",
"tableName",
",",
"path",
"=",
"None",
",",
"source",
"=",
"None",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"if",
"path",
"is",
"not",
"None",
":",
"options",
"[",
"\"path\"",
"]",
"=",
"path",
"if",
"source",
"is",
"None",
":",
"source",
"=",
"self",
".",
"_sparkSession",
".",
"_wrapped",
".",
"_conf",
".",
"defaultDataSourceName",
"(",
")",
"if",
"schema",
"is",
"None",
":",
"df",
"=",
"self",
".",
"_jcatalog",
".",
"createTable",
"(",
"tableName",
",",
"source",
",",
"options",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"schema",
",",
"StructType",
")",
":",
"raise",
"TypeError",
"(",
"\"schema should be StructType\"",
")",
"scala_datatype",
"=",
"self",
".",
"_jsparkSession",
".",
"parseDataType",
"(",
"schema",
".",
"json",
"(",
")",
")",
"df",
"=",
"self",
".",
"_jcatalog",
".",
"createTable",
"(",
"tableName",
",",
"source",
",",
"scala_datatype",
",",
"options",
")",
"return",
"DataFrame",
"(",
"df",
",",
"self",
".",
"_sparkSession",
".",
"_wrapped",
")"
] | 48.962963 | 28.074074 |
def add_full_state_methods(class_with_globalize_methods):
"""
class decorator to create "_full_state" methods/properties on the class (so they
are valid for all instances created from this class).
Parameters
----------
class_with_globalize_methods
"""
assert hasattr(class_with_globalize_methods, 'active_set')
assert hasattr(class_with_globalize_methods, 'nstates_full')
for name, method in class_with_globalize_methods.__dict__.copy().items():
if isinstance(method, property) and hasattr(method.fget, '_map_to_full_state_def_arg'):
default_value = method.fget._map_to_full_state_def_arg
axis = method.fget._map_to_full_state_along_axis
new_getter = _wrap_to_full_state(name, default_value, axis)
alias_to_full_state_inst = property(new_getter)
elif hasattr(method, '_map_to_full_state_def_arg'):
default_value = method._map_to_full_state_def_arg
axis = method._map_to_full_state_along_axis
alias_to_full_state_inst = _wrap_to_full_state(name, default_value, axis)
else:
continue
name += "_full_state"
setattr(class_with_globalize_methods, name, alias_to_full_state_inst)
return class_with_globalize_methods
|
[
"def",
"add_full_state_methods",
"(",
"class_with_globalize_methods",
")",
":",
"assert",
"hasattr",
"(",
"class_with_globalize_methods",
",",
"'active_set'",
")",
"assert",
"hasattr",
"(",
"class_with_globalize_methods",
",",
"'nstates_full'",
")",
"for",
"name",
",",
"method",
"in",
"class_with_globalize_methods",
".",
"__dict__",
".",
"copy",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"method",
",",
"property",
")",
"and",
"hasattr",
"(",
"method",
".",
"fget",
",",
"'_map_to_full_state_def_arg'",
")",
":",
"default_value",
"=",
"method",
".",
"fget",
".",
"_map_to_full_state_def_arg",
"axis",
"=",
"method",
".",
"fget",
".",
"_map_to_full_state_along_axis",
"new_getter",
"=",
"_wrap_to_full_state",
"(",
"name",
",",
"default_value",
",",
"axis",
")",
"alias_to_full_state_inst",
"=",
"property",
"(",
"new_getter",
")",
"elif",
"hasattr",
"(",
"method",
",",
"'_map_to_full_state_def_arg'",
")",
":",
"default_value",
"=",
"method",
".",
"_map_to_full_state_def_arg",
"axis",
"=",
"method",
".",
"_map_to_full_state_along_axis",
"alias_to_full_state_inst",
"=",
"_wrap_to_full_state",
"(",
"name",
",",
"default_value",
",",
"axis",
")",
"else",
":",
"continue",
"name",
"+=",
"\"_full_state\"",
"setattr",
"(",
"class_with_globalize_methods",
",",
"name",
",",
"alias_to_full_state_inst",
")",
"return",
"class_with_globalize_methods"
] | 42.133333 | 25.6 |
def register(self, schema):
"""Register input schema class.
When registering a schema, all inner schemas are registered as well.
:param Schema schema: schema to register.
:return: old registered schema.
:rtype: type
"""
result = None
uuid = schema.uuid
if uuid in self._schbyuuid:
result = self._schbyuuid[uuid]
if result != schema:
self._schbyuuid[uuid] = schema
name = schema.name
schemas = self._schbyname.setdefault(name, set())
schemas.add(schema)
for innername, innerschema in iteritems(schema.getschemas()):
if innerschema.uuid not in self._schbyuuid:
register(innerschema)
return result
|
[
"def",
"register",
"(",
"self",
",",
"schema",
")",
":",
"result",
"=",
"None",
"uuid",
"=",
"schema",
".",
"uuid",
"if",
"uuid",
"in",
"self",
".",
"_schbyuuid",
":",
"result",
"=",
"self",
".",
"_schbyuuid",
"[",
"uuid",
"]",
"if",
"result",
"!=",
"schema",
":",
"self",
".",
"_schbyuuid",
"[",
"uuid",
"]",
"=",
"schema",
"name",
"=",
"schema",
".",
"name",
"schemas",
"=",
"self",
".",
"_schbyname",
".",
"setdefault",
"(",
"name",
",",
"set",
"(",
")",
")",
"schemas",
".",
"add",
"(",
"schema",
")",
"for",
"innername",
",",
"innerschema",
"in",
"iteritems",
"(",
"schema",
".",
"getschemas",
"(",
")",
")",
":",
"if",
"innerschema",
".",
"uuid",
"not",
"in",
"self",
".",
"_schbyuuid",
":",
"register",
"(",
"innerschema",
")",
"return",
"result"
] | 24.09375 | 22.65625 |
def acosh(x):
"""
Inverse hyperbolic cosine
"""
if isinstance(x, UncertainFunction):
mcpts = np.arccosh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arccosh(x)
|
[
"def",
"acosh",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"UncertainFunction",
")",
":",
"mcpts",
"=",
"np",
".",
"arccosh",
"(",
"x",
".",
"_mcpts",
")",
"return",
"UncertainFunction",
"(",
"mcpts",
")",
"else",
":",
"return",
"np",
".",
"arccosh",
"(",
"x",
")"
] | 23.111111 | 9.555556 |
def consecutive_groups(iterable, ordering=lambda x: x):
"""Yield groups of consecutive items using :func:`itertools.groupby`.
The *ordering* function determines whether two items are adjacent by
returning their position.
By default, the ordering function is the identity function. This is
suitable for finding runs of numbers:
>>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
>>> for group in consecutive_groups(iterable):
... print(list(group))
[1]
[10, 11, 12]
[20]
[30, 31, 32, 33]
[40]
For finding runs of adjacent letters, try using the :meth:`index` method
of a string of letters:
>>> from string import ascii_lowercase
>>> iterable = 'abcdfgilmnop'
>>> ordering = ascii_lowercase.index
>>> for group in consecutive_groups(iterable, ordering):
... print(list(group))
['a', 'b', 'c', 'd']
['f', 'g']
['i']
['l', 'm', 'n', 'o', 'p']
"""
for k, g in groupby(
enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
):
yield map(itemgetter(1), g)
|
[
"def",
"consecutive_groups",
"(",
"iterable",
",",
"ordering",
"=",
"lambda",
"x",
":",
"x",
")",
":",
"for",
"k",
",",
"g",
"in",
"groupby",
"(",
"enumerate",
"(",
"iterable",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"-",
"ordering",
"(",
"x",
"[",
"1",
"]",
")",
")",
":",
"yield",
"map",
"(",
"itemgetter",
"(",
"1",
")",
",",
"g",
")"
] | 32.171429 | 19.771429 |
def upload_index_file(self, test_address, timestamp):
""" Create an index.html file with links to all the log files
that were just uploaded. """
global already_uploaded_files
already_uploaded_files = list(set(already_uploaded_files))
already_uploaded_files.sort()
file_name = "%s/%s/index.html" % (test_address, timestamp)
index = self.get_key(file_name)
index_str = []
for completed_file in already_uploaded_files:
index_str.append("<a href='" + self.bucket_url + ""
"%s'>%s</a>" % (completed_file, completed_file))
index.set_contents_from_string(
"<br>".join(index_str),
headers={"Content-Type": "text/html"})
index.make_public()
return "%s%s" % (self.bucket_url, file_name)
|
[
"def",
"upload_index_file",
"(",
"self",
",",
"test_address",
",",
"timestamp",
")",
":",
"global",
"already_uploaded_files",
"already_uploaded_files",
"=",
"list",
"(",
"set",
"(",
"already_uploaded_files",
")",
")",
"already_uploaded_files",
".",
"sort",
"(",
")",
"file_name",
"=",
"\"%s/%s/index.html\"",
"%",
"(",
"test_address",
",",
"timestamp",
")",
"index",
"=",
"self",
".",
"get_key",
"(",
"file_name",
")",
"index_str",
"=",
"[",
"]",
"for",
"completed_file",
"in",
"already_uploaded_files",
":",
"index_str",
".",
"append",
"(",
"\"<a href='\"",
"+",
"self",
".",
"bucket_url",
"+",
"\"\"",
"\"%s'>%s</a>\"",
"%",
"(",
"completed_file",
",",
"completed_file",
")",
")",
"index",
".",
"set_contents_from_string",
"(",
"\"<br>\"",
".",
"join",
"(",
"index_str",
")",
",",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"text/html\"",
"}",
")",
"index",
".",
"make_public",
"(",
")",
"return",
"\"%s%s\"",
"%",
"(",
"self",
".",
"bucket_url",
",",
"file_name",
")"
] | 48.529412 | 12 |
def toURLEncoded(self):
"""Generate an x-www-urlencoded string"""
args = self.toPostArgs().items()
args.sort()
return urllib.urlencode(args)
|
[
"def",
"toURLEncoded",
"(",
"self",
")",
":",
"args",
"=",
"self",
".",
"toPostArgs",
"(",
")",
".",
"items",
"(",
")",
"args",
".",
"sort",
"(",
")",
"return",
"urllib",
".",
"urlencode",
"(",
"args",
")"
] | 33.6 | 8.2 |
def _publish_grade(self, score, only_if_higher=None):
"""
Publish a grade to the runtime.
"""
grade_dict = {
'value': score.raw_earned,
'max_value': score.raw_possible,
'only_if_higher': only_if_higher,
}
self.runtime.publish(self, 'grade', grade_dict)
|
[
"def",
"_publish_grade",
"(",
"self",
",",
"score",
",",
"only_if_higher",
"=",
"None",
")",
":",
"grade_dict",
"=",
"{",
"'value'",
":",
"score",
".",
"raw_earned",
",",
"'max_value'",
":",
"score",
".",
"raw_possible",
",",
"'only_if_higher'",
":",
"only_if_higher",
",",
"}",
"self",
".",
"runtime",
".",
"publish",
"(",
"self",
",",
"'grade'",
",",
"grade_dict",
")"
] | 32.7 | 8.9 |
def get_sdc_by_id(self, id):
"""
Get ScaleIO SDC object by its id
:param name: id of SDC
:return: ScaleIO SDC object
:raise KeyError: No SDC with specified id found
:rtype: SDC object
"""
for sdc in self.sdc:
if sdc.id == id:
return sdc
raise KeyError("SDC with that ID not found")
|
[
"def",
"get_sdc_by_id",
"(",
"self",
",",
"id",
")",
":",
"for",
"sdc",
"in",
"self",
".",
"sdc",
":",
"if",
"sdc",
".",
"id",
"==",
"id",
":",
"return",
"sdc",
"raise",
"KeyError",
"(",
"\"SDC with that ID not found\"",
")"
] | 30.833333 | 8.833333 |
def validation_curve(train_scores, test_scores, param_range, param_name=None,
semilogx=False, ax=None):
"""Plot a validation curve
Plot a metric vs hyperpameter values for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
param_range : array-like
Hyperparameter values used to generate the curve
param_range : str
Hyperparameter name
semilgo : bool
Sets a log scale on the x axis
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/validation_curve.py
"""
if ax is None:
ax = plt.gca()
if semilogx:
ax.set_xscale('log')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.set_title("Validation Curve")
ax.set_ylabel("Score mean")
if param_name:
ax.set_xlabel(param_name)
ax.plot(param_range, train_scores_mean, label="Training score", color="r")
ax.plot(param_range, test_scores_mean, label="Cross-validation score",
color="g")
ax.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
ax.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
ax.legend(loc="best")
ax.margins(0.05)
return ax
|
[
"def",
"validation_curve",
"(",
"train_scores",
",",
"test_scores",
",",
"param_range",
",",
"param_name",
"=",
"None",
",",
"semilogx",
"=",
"False",
",",
"ax",
"=",
"None",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"if",
"semilogx",
":",
"ax",
".",
"set_xscale",
"(",
"'log'",
")",
"train_scores_mean",
"=",
"np",
".",
"mean",
"(",
"train_scores",
",",
"axis",
"=",
"1",
")",
"train_scores_std",
"=",
"np",
".",
"std",
"(",
"train_scores",
",",
"axis",
"=",
"1",
")",
"test_scores_mean",
"=",
"np",
".",
"mean",
"(",
"test_scores",
",",
"axis",
"=",
"1",
")",
"test_scores_std",
"=",
"np",
".",
"std",
"(",
"test_scores",
",",
"axis",
"=",
"1",
")",
"ax",
".",
"set_title",
"(",
"\"Validation Curve\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"Score mean\"",
")",
"if",
"param_name",
":",
"ax",
".",
"set_xlabel",
"(",
"param_name",
")",
"ax",
".",
"plot",
"(",
"param_range",
",",
"train_scores_mean",
",",
"label",
"=",
"\"Training score\"",
",",
"color",
"=",
"\"r\"",
")",
"ax",
".",
"plot",
"(",
"param_range",
",",
"test_scores_mean",
",",
"label",
"=",
"\"Cross-validation score\"",
",",
"color",
"=",
"\"g\"",
")",
"ax",
".",
"fill_between",
"(",
"param_range",
",",
"train_scores_mean",
"-",
"train_scores_std",
",",
"train_scores_mean",
"+",
"train_scores_std",
",",
"alpha",
"=",
"0.2",
",",
"color",
"=",
"\"r\"",
")",
"ax",
".",
"fill_between",
"(",
"param_range",
",",
"test_scores_mean",
"-",
"test_scores_std",
",",
"test_scores_mean",
"+",
"test_scores_std",
",",
"alpha",
"=",
"0.2",
",",
"color",
"=",
"\"g\"",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"\"best\"",
")",
"ax",
".",
"margins",
"(",
"0.05",
")",
"return",
"ax"
] | 28.129032 | 23.33871 |
def _set_addr(self, addr):
"""private helper method"""
if self._addr != addr:
ioctl(self._fd, SMBUS.I2C_SLAVE, addr)
self._addr = addr
|
[
"def",
"_set_addr",
"(",
"self",
",",
"addr",
")",
":",
"if",
"self",
".",
"_addr",
"!=",
"addr",
":",
"ioctl",
"(",
"self",
".",
"_fd",
",",
"SMBUS",
".",
"I2C_SLAVE",
",",
"addr",
")",
"self",
".",
"_addr",
"=",
"addr"
] | 34 | 9 |
def add_roles(self, databaseName, roleNames, collectionName=None):
"""Add multiple roles
Args:
databaseName (str): Database Name
roleNames (list of RoleSpecs): roles
Keyword Args:
collectionName (str): Collection
Raises:
ErrRoleException: role not compatible with the databaseName and/or collectionName
"""
for roleName in roleNames:
self.add_role(databaseName, roleName, collectionName)
|
[
"def",
"add_roles",
"(",
"self",
",",
"databaseName",
",",
"roleNames",
",",
"collectionName",
"=",
"None",
")",
":",
"for",
"roleName",
"in",
"roleNames",
":",
"self",
".",
"add_role",
"(",
"databaseName",
",",
"roleName",
",",
"collectionName",
")"
] | 34.133333 | 19.333333 |
def generate_entropy(strength, internal_entropy, external_entropy):
'''
strength - length of produced seed. One of 128, 192, 256
random - binary stream of random data from external HRNG
'''
if strength not in (128, 192, 256):
raise ValueError("Invalid strength")
if not internal_entropy:
raise ValueError("Internal entropy is not provided")
if len(internal_entropy) < 32:
raise ValueError("Internal entropy too short")
if not external_entropy:
raise ValueError("External entropy is not provided")
if len(external_entropy) < 32:
raise ValueError("External entropy too short")
entropy = hashlib.sha256(internal_entropy + external_entropy).digest()
entropy_stripped = entropy[:strength // 8]
if len(entropy_stripped) * 8 != strength:
raise ValueError("Entropy length mismatch")
return entropy_stripped
|
[
"def",
"generate_entropy",
"(",
"strength",
",",
"internal_entropy",
",",
"external_entropy",
")",
":",
"if",
"strength",
"not",
"in",
"(",
"128",
",",
"192",
",",
"256",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid strength\"",
")",
"if",
"not",
"internal_entropy",
":",
"raise",
"ValueError",
"(",
"\"Internal entropy is not provided\"",
")",
"if",
"len",
"(",
"internal_entropy",
")",
"<",
"32",
":",
"raise",
"ValueError",
"(",
"\"Internal entropy too short\"",
")",
"if",
"not",
"external_entropy",
":",
"raise",
"ValueError",
"(",
"\"External entropy is not provided\"",
")",
"if",
"len",
"(",
"external_entropy",
")",
"<",
"32",
":",
"raise",
"ValueError",
"(",
"\"External entropy too short\"",
")",
"entropy",
"=",
"hashlib",
".",
"sha256",
"(",
"internal_entropy",
"+",
"external_entropy",
")",
".",
"digest",
"(",
")",
"entropy_stripped",
"=",
"entropy",
"[",
":",
"strength",
"//",
"8",
"]",
"if",
"len",
"(",
"entropy_stripped",
")",
"*",
"8",
"!=",
"strength",
":",
"raise",
"ValueError",
"(",
"\"Entropy length mismatch\"",
")",
"return",
"entropy_stripped"
] | 32.555556 | 21.888889 |
def deconstruct(self):
"""Deconstruction for migrations.
Return a simpler object (_SerializedWorkflow), since our Workflows
are rather hard to serialize: Django doesn't like deconstructing
metaclass-built classes.
"""
name, path, args, kwargs = super(StateField, self).deconstruct()
# We want to display the proper class name, which isn't available
# at the same point for _SerializedWorkflow and Workflow.
if isinstance(self.workflow, _SerializedWorkflow):
workflow_class_name = self.workflow._name
else:
workflow_class_name = self.workflow.__class__.__name__
kwargs['workflow'] = _SerializedWorkflow(
name=workflow_class_name,
initial_state=str(self.workflow.initial_state.name),
states=[str(st.name) for st in self.workflow.states],
)
del kwargs['choices']
del kwargs['default']
return name, path, args, kwargs
|
[
"def",
"deconstruct",
"(",
"self",
")",
":",
"name",
",",
"path",
",",
"args",
",",
"kwargs",
"=",
"super",
"(",
"StateField",
",",
"self",
")",
".",
"deconstruct",
"(",
")",
"# We want to display the proper class name, which isn't available",
"# at the same point for _SerializedWorkflow and Workflow.",
"if",
"isinstance",
"(",
"self",
".",
"workflow",
",",
"_SerializedWorkflow",
")",
":",
"workflow_class_name",
"=",
"self",
".",
"workflow",
".",
"_name",
"else",
":",
"workflow_class_name",
"=",
"self",
".",
"workflow",
".",
"__class__",
".",
"__name__",
"kwargs",
"[",
"'workflow'",
"]",
"=",
"_SerializedWorkflow",
"(",
"name",
"=",
"workflow_class_name",
",",
"initial_state",
"=",
"str",
"(",
"self",
".",
"workflow",
".",
"initial_state",
".",
"name",
")",
",",
"states",
"=",
"[",
"str",
"(",
"st",
".",
"name",
")",
"for",
"st",
"in",
"self",
".",
"workflow",
".",
"states",
"]",
",",
")",
"del",
"kwargs",
"[",
"'choices'",
"]",
"del",
"kwargs",
"[",
"'default'",
"]",
"return",
"name",
",",
"path",
",",
"args",
",",
"kwargs"
] | 40.541667 | 20.875 |
def _getModules(self):
"""
Import and load application modules.
:return: <dict>
"""
modules = {}
modulesPath = os.path.join("application", "module")
moduleList = os.listdir(modulesPath)
for moduleName in moduleList:
modulePath = os.path.join(modulesPath, moduleName, "module.py")
if not os.path.isfile(modulePath):
continue
# importing module
moduleSpec = importlib.util.spec_from_file_location(
moduleName,
modulePath
)
module = importlib.util.module_from_spec(moduleSpec)
moduleSpec.loader.exec_module(module)
# initializing module
moduleInstance = module.Module(self)
modules[moduleName] = moduleInstance
return modules
|
[
"def",
"_getModules",
"(",
"self",
")",
":",
"modules",
"=",
"{",
"}",
"modulesPath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"application\"",
",",
"\"module\"",
")",
"moduleList",
"=",
"os",
".",
"listdir",
"(",
"modulesPath",
")",
"for",
"moduleName",
"in",
"moduleList",
":",
"modulePath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"modulesPath",
",",
"moduleName",
",",
"\"module.py\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"modulePath",
")",
":",
"continue",
"# importing module",
"moduleSpec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"moduleName",
",",
"modulePath",
")",
"module",
"=",
"importlib",
".",
"util",
".",
"module_from_spec",
"(",
"moduleSpec",
")",
"moduleSpec",
".",
"loader",
".",
"exec_module",
"(",
"module",
")",
"# initializing module",
"moduleInstance",
"=",
"module",
".",
"Module",
"(",
"self",
")",
"modules",
"[",
"moduleName",
"]",
"=",
"moduleInstance",
"return",
"modules"
] | 30 | 18 |
def verify_password(self, password, password_hash):
"""Verify plaintext ``password`` against ``hashed password``.
Args:
password(str): Plaintext password that the user types in.
password_hash(str): Password hash generated by a previous call to ``hash_password()``.
Returns:
| True when ``password`` matches ``password_hash``.
| False otherwise.
Example:
::
if verify_password('mypassword', user.password):
login_user(user)
"""
# Print deprecation warning if called with (password, user) instead of (password, user.password)
if isinstance(password_hash, self.user_manager.db_manager.UserClass):
print(
'Deprecation warning: verify_password(password, user) has been changed'\
' to: verify_password(password, password_hash). The user param will be deprecated.'\
' Please change your call with verify_password(password, user) into'\
' a call with verify_password(password, user.password)'
' as soon as possible.')
password_hash = password_hash.password # effectively user.password
# Use passlib's CryptContext to verify a password
return self.password_crypt_context.verify(password, password_hash)
|
[
"def",
"verify_password",
"(",
"self",
",",
"password",
",",
"password_hash",
")",
":",
"# Print deprecation warning if called with (password, user) instead of (password, user.password)",
"if",
"isinstance",
"(",
"password_hash",
",",
"self",
".",
"user_manager",
".",
"db_manager",
".",
"UserClass",
")",
":",
"print",
"(",
"'Deprecation warning: verify_password(password, user) has been changed'",
"' to: verify_password(password, password_hash). The user param will be deprecated.'",
"' Please change your call with verify_password(password, user) into'",
"' a call with verify_password(password, user.password)'",
"' as soon as possible.'",
")",
"password_hash",
"=",
"password_hash",
".",
"password",
"# effectively user.password",
"# Use passlib's CryptContext to verify a password",
"return",
"self",
".",
"password_crypt_context",
".",
"verify",
"(",
"password",
",",
"password_hash",
")"
] | 46.344828 | 29.586207 |
def line_content_counts_as_uncovered_manual(content: str) -> bool:
"""
Args:
content: A line with indentation and tail comments/space removed.
Returns:
Whether the line could be included in the coverage report.
"""
# Omit empty lines.
if not content:
return False
# Omit declarations.
for keyword in ['def', 'class']:
if content.startswith(keyword) and content.endswith(':'):
return False
# TODO: multiline comments, multiline strings, etc, etc.
return True
|
[
"def",
"line_content_counts_as_uncovered_manual",
"(",
"content",
":",
"str",
")",
"->",
"bool",
":",
"# Omit empty lines.",
"if",
"not",
"content",
":",
"return",
"False",
"# Omit declarations.",
"for",
"keyword",
"in",
"[",
"'def'",
",",
"'class'",
"]",
":",
"if",
"content",
".",
"startswith",
"(",
"keyword",
")",
"and",
"content",
".",
"endswith",
"(",
"':'",
")",
":",
"return",
"False",
"# TODO: multiline comments, multiline strings, etc, etc.",
"return",
"True"
] | 27.684211 | 22.526316 |
def _viewEntryNumber(self, ientry, select=True):
"""views entry #ientry. Also selects entry in listview if select=True"""
# pass entry to viewer dialog
self._viewing_ientry = ientry
entry = self.purrer.entries[ientry]
busy = BusyIndicator()
self.view_entry_dialog.viewEntry(entry,
prev=ientry > 0 and self.purrer.entries[ientry - 1],
next=ientry < len(self.purrer.entries) - 1 and self.purrer.entries[ientry + 1])
self.view_entry_dialog.show()
# select entry in listview
if select:
self.etw.clearSelection()
self.etw.setItemSelected(self.etw.topLevelItem(ientry), True)
|
[
"def",
"_viewEntryNumber",
"(",
"self",
",",
"ientry",
",",
"select",
"=",
"True",
")",
":",
"# pass entry to viewer dialog",
"self",
".",
"_viewing_ientry",
"=",
"ientry",
"entry",
"=",
"self",
".",
"purrer",
".",
"entries",
"[",
"ientry",
"]",
"busy",
"=",
"BusyIndicator",
"(",
")",
"self",
".",
"view_entry_dialog",
".",
"viewEntry",
"(",
"entry",
",",
"prev",
"=",
"ientry",
">",
"0",
"and",
"self",
".",
"purrer",
".",
"entries",
"[",
"ientry",
"-",
"1",
"]",
",",
"next",
"=",
"ientry",
"<",
"len",
"(",
"self",
".",
"purrer",
".",
"entries",
")",
"-",
"1",
"and",
"self",
".",
"purrer",
".",
"entries",
"[",
"ientry",
"+",
"1",
"]",
")",
"self",
".",
"view_entry_dialog",
".",
"show",
"(",
")",
"# select entry in listview",
"if",
"select",
":",
"self",
".",
"etw",
".",
"clearSelection",
"(",
")",
"self",
".",
"etw",
".",
"setItemSelected",
"(",
"self",
".",
"etw",
".",
"topLevelItem",
"(",
"ientry",
")",
",",
"True",
")"
] | 52.428571 | 16.714286 |
def main(**options):
"""Spline loc tool."""
application = Application(**options)
# fails application when your defined threshold is higher than your ratio of com/loc.
if not application.run():
sys.exit(1)
return application
|
[
"def",
"main",
"(",
"*",
"*",
"options",
")",
":",
"application",
"=",
"Application",
"(",
"*",
"*",
"options",
")",
"# fails application when your defined threshold is higher than your ratio of com/loc.",
"if",
"not",
"application",
".",
"run",
"(",
")",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"application"
] | 35 | 17 |
def baseline_or_audit(self, allow_deletion=False, audit_only=False):
"""Baseline synchonization or audit.
Both functions implemented in this routine because audit is a prerequisite
for a baseline sync. In the case of baseline sync the last timestamp seen
is recorded as client state.
"""
action = ('audit' if (audit_only) else 'baseline sync')
self.logger.debug("Starting " + action)
# 0. Sanity checks
if (len(self.mapper) < 1):
raise ClientFatalError(
"No source to destination mapping specified")
if (not audit_only and self.mapper.unsafe()):
raise ClientFatalError(
"Source to destination mappings unsafe: %s" %
str(self.mapper))
# 1. Get inventories from both src and dst
# 1.a source resource list
src_resource_list = self.find_resource_list()
self.logger.info(
"Read source resource list, %d resources listed" %
(len(src_resource_list)))
if (len(src_resource_list) == 0):
raise ClientFatalError(
"Aborting as there are no resources to sync")
if (len(self.hashes) > 0):
self.prune_hashes(src_resource_list.hashes(), 'resource')
# 1.b destination resource list mapped back to source URIs
rlb = ResourceListBuilder(set_hashes=self.hashes, mapper=self.mapper)
dst_resource_list = rlb.from_disk()
# 2. Compare these resource lists respecting any comparison options
(same, updated, deleted, created) = dst_resource_list.compare(src_resource_list)
# 3. Report status and planned actions
self.log_status(in_sync=(len(updated) + len(deleted) + len(created) == 0),
audit=True, same=len(same), created=len(created),
updated=len(updated), deleted=len(deleted))
if (audit_only or len(created) + len(updated) + len(deleted) == 0):
self.logger.debug("Completed " + action)
return
# 4. Check that sitemap has authority over URIs listed
if (not self.noauth):
uauth = UrlAuthority(self.sitemap, strict=self.strictauth)
for resource in src_resource_list:
if (not uauth.has_authority_over(resource.uri)):
raise ClientFatalError(
"Aborting as sitemap (%s) mentions resource at a location it does not have authority over (%s), override with --noauth" %
(self.sitemap, resource.uri))
# 5. Grab files to do sync
delete_msg = (
", and delete %d resources" %
len(deleted)) if (allow_deletion) else ''
self.logger.warning(
"Will GET %d resources%s" %
(len(created) + len(updated), delete_msg))
self.last_timestamp = 0
num_created = 0
num_updated = 0
num_deleted = 0
for resource in created:
uri = resource.uri
filename = self.mapper.src_to_dst(uri)
self.logger.info("created: %s -> %s" % (uri, filename))
num_created += self.update_resource(resource, filename, 'created')
for resource in updated:
uri = resource.uri
filename = self.mapper.src_to_dst(uri)
self.logger.info("updated: %s -> %s" % (uri, filename))
num_updated += self.update_resource(resource, filename, 'updated')
for resource in deleted:
uri = resource.uri
filename = self.mapper.src_to_dst(uri)
num_deleted += self.delete_resource(resource,
filename, allow_deletion)
# 6. Store last timestamp to allow incremental sync
if (not audit_only and self.last_timestamp > 0):
ClientState().set_state(self.sitemap, self.last_timestamp)
self.logger.info(
"Written last timestamp %s for incremental sync" %
(datetime_to_str(
self.last_timestamp)))
# 7. Done
self.log_status(in_sync=(len(updated) + len(deleted) + len(created) == 0),
same=len(same), created=num_created,
updated=num_updated, deleted=num_deleted, to_delete=len(deleted))
self.logger.debug("Completed %s" % (action))
|
[
"def",
"baseline_or_audit",
"(",
"self",
",",
"allow_deletion",
"=",
"False",
",",
"audit_only",
"=",
"False",
")",
":",
"action",
"=",
"(",
"'audit'",
"if",
"(",
"audit_only",
")",
"else",
"'baseline sync'",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Starting \"",
"+",
"action",
")",
"# 0. Sanity checks",
"if",
"(",
"len",
"(",
"self",
".",
"mapper",
")",
"<",
"1",
")",
":",
"raise",
"ClientFatalError",
"(",
"\"No source to destination mapping specified\"",
")",
"if",
"(",
"not",
"audit_only",
"and",
"self",
".",
"mapper",
".",
"unsafe",
"(",
")",
")",
":",
"raise",
"ClientFatalError",
"(",
"\"Source to destination mappings unsafe: %s\"",
"%",
"str",
"(",
"self",
".",
"mapper",
")",
")",
"# 1. Get inventories from both src and dst",
"# 1.a source resource list",
"src_resource_list",
"=",
"self",
".",
"find_resource_list",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Read source resource list, %d resources listed\"",
"%",
"(",
"len",
"(",
"src_resource_list",
")",
")",
")",
"if",
"(",
"len",
"(",
"src_resource_list",
")",
"==",
"0",
")",
":",
"raise",
"ClientFatalError",
"(",
"\"Aborting as there are no resources to sync\"",
")",
"if",
"(",
"len",
"(",
"self",
".",
"hashes",
")",
">",
"0",
")",
":",
"self",
".",
"prune_hashes",
"(",
"src_resource_list",
".",
"hashes",
"(",
")",
",",
"'resource'",
")",
"# 1.b destination resource list mapped back to source URIs",
"rlb",
"=",
"ResourceListBuilder",
"(",
"set_hashes",
"=",
"self",
".",
"hashes",
",",
"mapper",
"=",
"self",
".",
"mapper",
")",
"dst_resource_list",
"=",
"rlb",
".",
"from_disk",
"(",
")",
"# 2. Compare these resource lists respecting any comparison options",
"(",
"same",
",",
"updated",
",",
"deleted",
",",
"created",
")",
"=",
"dst_resource_list",
".",
"compare",
"(",
"src_resource_list",
")",
"# 3. Report status and planned actions",
"self",
".",
"log_status",
"(",
"in_sync",
"=",
"(",
"len",
"(",
"updated",
")",
"+",
"len",
"(",
"deleted",
")",
"+",
"len",
"(",
"created",
")",
"==",
"0",
")",
",",
"audit",
"=",
"True",
",",
"same",
"=",
"len",
"(",
"same",
")",
",",
"created",
"=",
"len",
"(",
"created",
")",
",",
"updated",
"=",
"len",
"(",
"updated",
")",
",",
"deleted",
"=",
"len",
"(",
"deleted",
")",
")",
"if",
"(",
"audit_only",
"or",
"len",
"(",
"created",
")",
"+",
"len",
"(",
"updated",
")",
"+",
"len",
"(",
"deleted",
")",
"==",
"0",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Completed \"",
"+",
"action",
")",
"return",
"# 4. Check that sitemap has authority over URIs listed",
"if",
"(",
"not",
"self",
".",
"noauth",
")",
":",
"uauth",
"=",
"UrlAuthority",
"(",
"self",
".",
"sitemap",
",",
"strict",
"=",
"self",
".",
"strictauth",
")",
"for",
"resource",
"in",
"src_resource_list",
":",
"if",
"(",
"not",
"uauth",
".",
"has_authority_over",
"(",
"resource",
".",
"uri",
")",
")",
":",
"raise",
"ClientFatalError",
"(",
"\"Aborting as sitemap (%s) mentions resource at a location it does not have authority over (%s), override with --noauth\"",
"%",
"(",
"self",
".",
"sitemap",
",",
"resource",
".",
"uri",
")",
")",
"# 5. Grab files to do sync",
"delete_msg",
"=",
"(",
"\", and delete %d resources\"",
"%",
"len",
"(",
"deleted",
")",
")",
"if",
"(",
"allow_deletion",
")",
"else",
"''",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Will GET %d resources%s\"",
"%",
"(",
"len",
"(",
"created",
")",
"+",
"len",
"(",
"updated",
")",
",",
"delete_msg",
")",
")",
"self",
".",
"last_timestamp",
"=",
"0",
"num_created",
"=",
"0",
"num_updated",
"=",
"0",
"num_deleted",
"=",
"0",
"for",
"resource",
"in",
"created",
":",
"uri",
"=",
"resource",
".",
"uri",
"filename",
"=",
"self",
".",
"mapper",
".",
"src_to_dst",
"(",
"uri",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"created: %s -> %s\"",
"%",
"(",
"uri",
",",
"filename",
")",
")",
"num_created",
"+=",
"self",
".",
"update_resource",
"(",
"resource",
",",
"filename",
",",
"'created'",
")",
"for",
"resource",
"in",
"updated",
":",
"uri",
"=",
"resource",
".",
"uri",
"filename",
"=",
"self",
".",
"mapper",
".",
"src_to_dst",
"(",
"uri",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"updated: %s -> %s\"",
"%",
"(",
"uri",
",",
"filename",
")",
")",
"num_updated",
"+=",
"self",
".",
"update_resource",
"(",
"resource",
",",
"filename",
",",
"'updated'",
")",
"for",
"resource",
"in",
"deleted",
":",
"uri",
"=",
"resource",
".",
"uri",
"filename",
"=",
"self",
".",
"mapper",
".",
"src_to_dst",
"(",
"uri",
")",
"num_deleted",
"+=",
"self",
".",
"delete_resource",
"(",
"resource",
",",
"filename",
",",
"allow_deletion",
")",
"# 6. Store last timestamp to allow incremental sync",
"if",
"(",
"not",
"audit_only",
"and",
"self",
".",
"last_timestamp",
">",
"0",
")",
":",
"ClientState",
"(",
")",
".",
"set_state",
"(",
"self",
".",
"sitemap",
",",
"self",
".",
"last_timestamp",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Written last timestamp %s for incremental sync\"",
"%",
"(",
"datetime_to_str",
"(",
"self",
".",
"last_timestamp",
")",
")",
")",
"# 7. Done",
"self",
".",
"log_status",
"(",
"in_sync",
"=",
"(",
"len",
"(",
"updated",
")",
"+",
"len",
"(",
"deleted",
")",
"+",
"len",
"(",
"created",
")",
"==",
"0",
")",
",",
"same",
"=",
"len",
"(",
"same",
")",
",",
"created",
"=",
"num_created",
",",
"updated",
"=",
"num_updated",
",",
"deleted",
"=",
"num_deleted",
",",
"to_delete",
"=",
"len",
"(",
"deleted",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Completed %s\"",
"%",
"(",
"action",
")",
")"
] | 50.313953 | 18.534884 |
def _utc_to_local(self, data, local_zone="America/Los_Angeles"):
""" Adjust index of dataframe according to timezone that is requested by user.
Parameters
----------
data : pd.DataFrame()
Pandas dataframe of json timeseries response from server.
local_zone : str
pytz.timezone string of specified local timezone to change index to.
Returns
-------
pd.DataFrame()
Pandas dataframe with timestamp index adjusted for local timezone.
"""
# Accounts for localtime shift
data.index = data.index.tz_localize(pytz.utc).tz_convert(local_zone)
# Gets rid of extra offset information so can compare with csv data
data.index = data.index.tz_localize(None)
return data
|
[
"def",
"_utc_to_local",
"(",
"self",
",",
"data",
",",
"local_zone",
"=",
"\"America/Los_Angeles\"",
")",
":",
"# Accounts for localtime shift",
"data",
".",
"index",
"=",
"data",
".",
"index",
".",
"tz_localize",
"(",
"pytz",
".",
"utc",
")",
".",
"tz_convert",
"(",
"local_zone",
")",
"# Gets rid of extra offset information so can compare with csv data",
"data",
".",
"index",
"=",
"data",
".",
"index",
".",
"tz_localize",
"(",
"None",
")",
"return",
"data"
] | 33.416667 | 24.875 |
def ParseMany(text):
"""Parses many YAML documents into a list of Python objects.
Args:
text: A YAML source with multiple documents embedded.
Returns:
A list of Python data structures corresponding to the YAML documents.
"""
precondition.AssertType(text, Text)
if compatibility.PY2:
text = text.encode("utf-8")
return list(yaml.safe_load_all(text))
|
[
"def",
"ParseMany",
"(",
"text",
")",
":",
"precondition",
".",
"AssertType",
"(",
"text",
",",
"Text",
")",
"if",
"compatibility",
".",
"PY2",
":",
"text",
"=",
"text",
".",
"encode",
"(",
"\"utf-8\"",
")",
"return",
"list",
"(",
"yaml",
".",
"safe_load_all",
"(",
"text",
")",
")"
] | 24.266667 | 21.533333 |
def _getParameters(self):
"""Returns the result of this decorator."""
param = self.query._getParameters()
key = self.__PATTERN_KEY % (str(self._getIndex()))
val = self.__PATTERN_VALUE % (self.__column, self.__value)
#self.__column.getColumnAlias()
param.update({key:val})
return param
|
[
"def",
"_getParameters",
"(",
"self",
")",
":",
"param",
"=",
"self",
".",
"query",
".",
"_getParameters",
"(",
")",
"key",
"=",
"self",
".",
"__PATTERN_KEY",
"%",
"(",
"str",
"(",
"self",
".",
"_getIndex",
"(",
")",
")",
")",
"val",
"=",
"self",
".",
"__PATTERN_VALUE",
"%",
"(",
"self",
".",
"__column",
",",
"self",
".",
"__value",
")",
"#self.__column.getColumnAlias()",
"param",
".",
"update",
"(",
"{",
"key",
":",
"val",
"}",
")",
"return",
"param"
] | 41.625 | 11.5 |
def fetch_csv_dataframe(
download_url,
filename=None,
subdir=None,
**pandas_kwargs):
"""
Download a remote file from `download_url` and save it locally as `filename`.
Load that local file as a CSV into Pandas using extra keyword arguments such as sep='\t'.
"""
path = fetch_file(
download_url=download_url,
filename=filename,
decompress=True,
subdir=subdir)
return pd.read_csv(path, **pandas_kwargs)
|
[
"def",
"fetch_csv_dataframe",
"(",
"download_url",
",",
"filename",
"=",
"None",
",",
"subdir",
"=",
"None",
",",
"*",
"*",
"pandas_kwargs",
")",
":",
"path",
"=",
"fetch_file",
"(",
"download_url",
"=",
"download_url",
",",
"filename",
"=",
"filename",
",",
"decompress",
"=",
"True",
",",
"subdir",
"=",
"subdir",
")",
"return",
"pd",
".",
"read_csv",
"(",
"path",
",",
"*",
"*",
"pandas_kwargs",
")"
] | 31.533333 | 17.266667 |
def get_composition_query_session_for_repository(self, repository_id, proxy):
"""Gets a composition query session for the given repository.
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.CompositionQuerySession) - a
CompositionQuerySession
raise: NotFound - repository_id not found
raise: NullArgument - repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_composition_query() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_composition_query() and
supports_visible_federation() are true.
"""
if repository_id is None:
raise NullArgument()
if not self.supports_composition_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise # OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.CompositionQuerySession(repository_id, proxy, runtime=self._runtime)
except AttributeError:
raise # OperationFailed()
return session
|
[
"def",
"get_composition_query_session_for_repository",
"(",
"self",
",",
"repository_id",
",",
"proxy",
")",
":",
"if",
"repository_id",
"is",
"None",
":",
"raise",
"NullArgument",
"(",
")",
"if",
"not",
"self",
".",
"supports_composition_query",
"(",
")",
":",
"raise",
"Unimplemented",
"(",
")",
"try",
":",
"from",
".",
"import",
"sessions",
"except",
"ImportError",
":",
"raise",
"# OperationFailed()",
"proxy",
"=",
"self",
".",
"_convert_proxy",
"(",
"proxy",
")",
"try",
":",
"session",
"=",
"sessions",
".",
"CompositionQuerySession",
"(",
"repository_id",
",",
"proxy",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"except",
"AttributeError",
":",
"raise",
"# OperationFailed()",
"return",
"session"
] | 42.903226 | 16.451613 |
def get(self, request, **kwargs):
"""
Return the customer's valid subscriptions.
Returns with status code 200.
"""
customer, _created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request)
)
serializer = SubscriptionSerializer(customer.subscription)
return Response(serializer.data)
|
[
"def",
"get",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"customer",
",",
"_created",
"=",
"Customer",
".",
"get_or_create",
"(",
"subscriber",
"=",
"subscriber_request_callback",
"(",
"self",
".",
"request",
")",
")",
"serializer",
"=",
"SubscriptionSerializer",
"(",
"customer",
".",
"subscription",
")",
"return",
"Response",
"(",
"serializer",
".",
"data",
")"
] | 26.333333 | 15.333333 |
def relpath(self):
"""
Determine the relative path to this repository
Returns:
str: relative path to this repository
"""
here = os.path.abspath(os.path.curdir)
relpath = os.path.relpath(self.fpath, here)
return relpath
|
[
"def",
"relpath",
"(",
"self",
")",
":",
"here",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"curdir",
")",
"relpath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"self",
".",
"fpath",
",",
"here",
")",
"return",
"relpath"
] | 27.8 | 14.4 |
def update_widget_attrs(self, bound_field, attrs):
"""
Update the dictionary of attributes used while rendering the input widget
"""
bound_field.form.update_widget_attrs(bound_field, attrs)
widget_classes = self.widget.attrs.get('class', None)
if widget_classes:
if 'class' in attrs:
attrs['class'] += ' ' + widget_classes
else:
attrs.update({'class': widget_classes})
return attrs
|
[
"def",
"update_widget_attrs",
"(",
"self",
",",
"bound_field",
",",
"attrs",
")",
":",
"bound_field",
".",
"form",
".",
"update_widget_attrs",
"(",
"bound_field",
",",
"attrs",
")",
"widget_classes",
"=",
"self",
".",
"widget",
".",
"attrs",
".",
"get",
"(",
"'class'",
",",
"None",
")",
"if",
"widget_classes",
":",
"if",
"'class'",
"in",
"attrs",
":",
"attrs",
"[",
"'class'",
"]",
"+=",
"' '",
"+",
"widget_classes",
"else",
":",
"attrs",
".",
"update",
"(",
"{",
"'class'",
":",
"widget_classes",
"}",
")",
"return",
"attrs"
] | 40.25 | 15.916667 |
def read_interactions(path, comments="#", directed=False, delimiter=None,
nodetype=None, timestamptype=None, encoding='utf-8', keys=False):
"""Read a DyNetx graph from interaction list format.
Parameters
----------
path : basestring
The desired output filename
delimiter : character
Column delimiter
"""
ids = None
lines = (line.decode(encoding) for line in path)
if keys:
ids = read_ids(path.name, delimiter=delimiter, timestamptype=timestamptype)
return parse_interactions(lines, comments=comments, directed=directed, delimiter=delimiter, nodetype=nodetype,
timestamptype=timestamptype, keys=ids)
|
[
"def",
"read_interactions",
"(",
"path",
",",
"comments",
"=",
"\"#\"",
",",
"directed",
"=",
"False",
",",
"delimiter",
"=",
"None",
",",
"nodetype",
"=",
"None",
",",
"timestamptype",
"=",
"None",
",",
"encoding",
"=",
"'utf-8'",
",",
"keys",
"=",
"False",
")",
":",
"ids",
"=",
"None",
"lines",
"=",
"(",
"line",
".",
"decode",
"(",
"encoding",
")",
"for",
"line",
"in",
"path",
")",
"if",
"keys",
":",
"ids",
"=",
"read_ids",
"(",
"path",
".",
"name",
",",
"delimiter",
"=",
"delimiter",
",",
"timestamptype",
"=",
"timestamptype",
")",
"return",
"parse_interactions",
"(",
"lines",
",",
"comments",
"=",
"comments",
",",
"directed",
"=",
"directed",
",",
"delimiter",
"=",
"delimiter",
",",
"nodetype",
"=",
"nodetype",
",",
"timestamptype",
"=",
"timestamptype",
",",
"keys",
"=",
"ids",
")"
] | 34.190476 | 27.095238 |
def over(self, window):
"""
Define a windowing column.
:param window: a :class:`WindowSpec`
:return: a Column
>>> from pyspark.sql import Window
>>> window = Window.partitionBy("name").orderBy("age").rowsBetween(-1, 1)
>>> from pyspark.sql.functions import rank, min
>>> # df.select(rank().over(window), min('age').over(window))
"""
from pyspark.sql.window import WindowSpec
if not isinstance(window, WindowSpec):
raise TypeError("window should be WindowSpec")
jc = self._jc.over(window._jspec)
return Column(jc)
|
[
"def",
"over",
"(",
"self",
",",
"window",
")",
":",
"from",
"pyspark",
".",
"sql",
".",
"window",
"import",
"WindowSpec",
"if",
"not",
"isinstance",
"(",
"window",
",",
"WindowSpec",
")",
":",
"raise",
"TypeError",
"(",
"\"window should be WindowSpec\"",
")",
"jc",
"=",
"self",
".",
"_jc",
".",
"over",
"(",
"window",
".",
"_jspec",
")",
"return",
"Column",
"(",
"jc",
")"
] | 36.117647 | 15.176471 |
def next_flightmode_colour(self):
'''allocate a colour to be used for a flight mode'''
if self.flightmode_colour_index > len(flightmode_colours):
print("Out of colours; reusing")
self.flightmode_colour_index = 0
ret = flightmode_colours[self.flightmode_colour_index]
self.flightmode_colour_index += 1
return ret
|
[
"def",
"next_flightmode_colour",
"(",
"self",
")",
":",
"if",
"self",
".",
"flightmode_colour_index",
">",
"len",
"(",
"flightmode_colours",
")",
":",
"print",
"(",
"\"Out of colours; reusing\"",
")",
"self",
".",
"flightmode_colour_index",
"=",
"0",
"ret",
"=",
"flightmode_colours",
"[",
"self",
".",
"flightmode_colour_index",
"]",
"self",
".",
"flightmode_colour_index",
"+=",
"1",
"return",
"ret"
] | 46 | 13.25 |
def qteMakeWidgetActive(self, widgetObj: QtGui.QWidget):
"""
Give keyboard focus to ``widgetObj``.
If ``widgetObj`` is **None** then the internal focus state
is reset, but the focus manger will automatically
activate the first available widget again.
|Args|
* ``widgetObj`` (**QWidget**): the widget to focus on.
|Returns|
* **None**
|Raises|
* **QtmacsOtherError** if ``widgetObj`` was not added with
``qteAddWidget``.
"""
# Void the active widget information.
if widgetObj is None:
self._qteActiveWidget = None
return
# Ensure that this applet is an ancestor of ``widgetObj``
# inside the Qt hierarchy.
if qteGetAppletFromWidget(widgetObj) is not self:
msg = 'The specified widget is not inside the current applet.'
raise QtmacsOtherError(msg)
# If widgetObj is not registered with Qtmacs then simply declare
# it active and return.
if not hasattr(widgetObj, '_qteAdmin'):
self._qteActiveWidget = widgetObj
return
# Do nothing if widgetObj refers to an applet.
if widgetObj._qteAdmin.isQtmacsApplet:
self._qteActiveWidget = None
return
# Housekeeping: remove non-existing widgets from the admin structure.
self.qteAutoremoveDeletedWidgets()
# Verify the widget is registered for this applet.
if widgetObj not in self._qteAdmin.widgetList:
msg = 'Widget is not registered for this applet.'
self.qteLogger.error(msg, stack_info=True)
self._qteActiveWidget = None
return
# The focus manager in QtmacsMain will hand the focus to
# whatever the _qteActiveWidget variable of the active applet
# points to.
self.qteSetWidgetFocusOrder((self._qteActiveWidget, widgetObj))
self._qteActiveWidget = widgetObj
|
[
"def",
"qteMakeWidgetActive",
"(",
"self",
",",
"widgetObj",
":",
"QtGui",
".",
"QWidget",
")",
":",
"# Void the active widget information.",
"if",
"widgetObj",
"is",
"None",
":",
"self",
".",
"_qteActiveWidget",
"=",
"None",
"return",
"# Ensure that this applet is an ancestor of ``widgetObj``",
"# inside the Qt hierarchy.",
"if",
"qteGetAppletFromWidget",
"(",
"widgetObj",
")",
"is",
"not",
"self",
":",
"msg",
"=",
"'The specified widget is not inside the current applet.'",
"raise",
"QtmacsOtherError",
"(",
"msg",
")",
"# If widgetObj is not registered with Qtmacs then simply declare",
"# it active and return.",
"if",
"not",
"hasattr",
"(",
"widgetObj",
",",
"'_qteAdmin'",
")",
":",
"self",
".",
"_qteActiveWidget",
"=",
"widgetObj",
"return",
"# Do nothing if widgetObj refers to an applet.",
"if",
"widgetObj",
".",
"_qteAdmin",
".",
"isQtmacsApplet",
":",
"self",
".",
"_qteActiveWidget",
"=",
"None",
"return",
"# Housekeeping: remove non-existing widgets from the admin structure.",
"self",
".",
"qteAutoremoveDeletedWidgets",
"(",
")",
"# Verify the widget is registered for this applet.",
"if",
"widgetObj",
"not",
"in",
"self",
".",
"_qteAdmin",
".",
"widgetList",
":",
"msg",
"=",
"'Widget is not registered for this applet.'",
"self",
".",
"qteLogger",
".",
"error",
"(",
"msg",
",",
"stack_info",
"=",
"True",
")",
"self",
".",
"_qteActiveWidget",
"=",
"None",
"return",
"# The focus manager in QtmacsMain will hand the focus to",
"# whatever the _qteActiveWidget variable of the active applet",
"# points to.",
"self",
".",
"qteSetWidgetFocusOrder",
"(",
"(",
"self",
".",
"_qteActiveWidget",
",",
"widgetObj",
")",
")",
"self",
".",
"_qteActiveWidget",
"=",
"widgetObj"
] | 33.775862 | 21.017241 |
def get_or_create_iobject(identifier_uid,
identifier_namespace_uri,
iobject_type_name,
iobject_type_namespace_uri,
iobject_type_revision_name,
iobject_family_name,
iobject_family_revision_name="",
identifier_namespace_name="",
timestamp=None,
create_timestamp=None,
overwrite=False,
dingos_class_map=dingos_class_map):
"""
Get or create an information object.
"""
# create or retrieve the iobject type and revision
# create or retrieve identifier
if not timestamp:
raise StandardError("You must supply a timestamp.")
id_namespace, created = dingos_class_map['IdentifierNameSpace'].objects.get_or_create(uri=identifier_namespace_uri)
if created and identifier_namespace_name:
id_namespace.name = identifier_namespace_name
id_namespace.save()
identifier, created = dingos_class_map['Identifier'].objects.get_or_create(uid=identifier_uid,
namespace=id_namespace,
defaults={'latest': None})
iobject_type_namespace, created = dingos_class_map['DataTypeNameSpace'].objects.get_or_create(uri=iobject_type_namespace_uri)
iobject_family, created = dingos_class_map['InfoObjectFamily'].objects.get_or_create(name=iobject_family_name)
iobject_family_revision, created = dingos_class_map['Revision'].objects.get_or_create(
name=iobject_family_revision_name)
# create or retrieve the iobject type
iobject_type, created = dingos_class_map['InfoObjectType'].objects.get_or_create(name=iobject_type_name,
iobject_family=iobject_family,
namespace=iobject_type_namespace)
iobject_type_revision, created = dingos_class_map['Revision'].objects.get_or_create(name=iobject_type_revision_name)
if not create_timestamp:
create_timestamp = timezone.now()
#if not timestamp:
# timestamp = create_timestamp
# iobject = overwrite
# created = False
iobject, created = dingos_class_map["InfoObject"].objects.get_or_create(identifier=identifier,
timestamp=timestamp,
defaults={'iobject_family': iobject_family,
'iobject_family_revision': iobject_family_revision,
'iobject_type': iobject_type,
'iobject_type_revision': iobject_type_revision,
'create_timestamp': create_timestamp})
if created:
iobject.set_name()
iobject.save()
identifier.latest = iobject
identifier.save()
elif overwrite:
iobject.timestamp = timestamp
iobject.create_timestamp = create_timestamp
iobject.iobject_family = iobject_family
iobject.iobject_family_revision = iobject_family_revision
iobject.iobject_type = iobject_type
iobject.iobject_type_revision = iobject_type_revision
iobject.set_name()
iobject.save()
logger.debug(
"Created iobject id with %s , ts %s (created was %s) and overwrite as %s" % (iobject.identifier, timestamp, created, overwrite))
return iobject, created
|
[
"def",
"get_or_create_iobject",
"(",
"identifier_uid",
",",
"identifier_namespace_uri",
",",
"iobject_type_name",
",",
"iobject_type_namespace_uri",
",",
"iobject_type_revision_name",
",",
"iobject_family_name",
",",
"iobject_family_revision_name",
"=",
"\"\"",
",",
"identifier_namespace_name",
"=",
"\"\"",
",",
"timestamp",
"=",
"None",
",",
"create_timestamp",
"=",
"None",
",",
"overwrite",
"=",
"False",
",",
"dingos_class_map",
"=",
"dingos_class_map",
")",
":",
"# create or retrieve the iobject type and revision",
"# create or retrieve identifier",
"if",
"not",
"timestamp",
":",
"raise",
"StandardError",
"(",
"\"You must supply a timestamp.\"",
")",
"id_namespace",
",",
"created",
"=",
"dingos_class_map",
"[",
"'IdentifierNameSpace'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"uri",
"=",
"identifier_namespace_uri",
")",
"if",
"created",
"and",
"identifier_namespace_name",
":",
"id_namespace",
".",
"name",
"=",
"identifier_namespace_name",
"id_namespace",
".",
"save",
"(",
")",
"identifier",
",",
"created",
"=",
"dingos_class_map",
"[",
"'Identifier'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"uid",
"=",
"identifier_uid",
",",
"namespace",
"=",
"id_namespace",
",",
"defaults",
"=",
"{",
"'latest'",
":",
"None",
"}",
")",
"iobject_type_namespace",
",",
"created",
"=",
"dingos_class_map",
"[",
"'DataTypeNameSpace'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"uri",
"=",
"iobject_type_namespace_uri",
")",
"iobject_family",
",",
"created",
"=",
"dingos_class_map",
"[",
"'InfoObjectFamily'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"iobject_family_name",
")",
"iobject_family_revision",
",",
"created",
"=",
"dingos_class_map",
"[",
"'Revision'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"iobject_family_revision_name",
")",
"# create or retrieve the iobject type",
"iobject_type",
",",
"created",
"=",
"dingos_class_map",
"[",
"'InfoObjectType'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"iobject_type_name",
",",
"iobject_family",
"=",
"iobject_family",
",",
"namespace",
"=",
"iobject_type_namespace",
")",
"iobject_type_revision",
",",
"created",
"=",
"dingos_class_map",
"[",
"'Revision'",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"name",
"=",
"iobject_type_revision_name",
")",
"if",
"not",
"create_timestamp",
":",
"create_timestamp",
"=",
"timezone",
".",
"now",
"(",
")",
"#if not timestamp:",
"# timestamp = create_timestamp",
"# iobject = overwrite",
"# created = False",
"iobject",
",",
"created",
"=",
"dingos_class_map",
"[",
"\"InfoObject\"",
"]",
".",
"objects",
".",
"get_or_create",
"(",
"identifier",
"=",
"identifier",
",",
"timestamp",
"=",
"timestamp",
",",
"defaults",
"=",
"{",
"'iobject_family'",
":",
"iobject_family",
",",
"'iobject_family_revision'",
":",
"iobject_family_revision",
",",
"'iobject_type'",
":",
"iobject_type",
",",
"'iobject_type_revision'",
":",
"iobject_type_revision",
",",
"'create_timestamp'",
":",
"create_timestamp",
"}",
")",
"if",
"created",
":",
"iobject",
".",
"set_name",
"(",
")",
"iobject",
".",
"save",
"(",
")",
"identifier",
".",
"latest",
"=",
"iobject",
"identifier",
".",
"save",
"(",
")",
"elif",
"overwrite",
":",
"iobject",
".",
"timestamp",
"=",
"timestamp",
"iobject",
".",
"create_timestamp",
"=",
"create_timestamp",
"iobject",
".",
"iobject_family",
"=",
"iobject_family",
"iobject",
".",
"iobject_family_revision",
"=",
"iobject_family_revision",
"iobject",
".",
"iobject_type",
"=",
"iobject_type",
"iobject",
".",
"iobject_type_revision",
"=",
"iobject_type_revision",
"iobject",
".",
"set_name",
"(",
")",
"iobject",
".",
"save",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Created iobject id with %s , ts %s (created was %s) and overwrite as %s\"",
"%",
"(",
"iobject",
".",
"identifier",
",",
"timestamp",
",",
"created",
",",
"overwrite",
")",
")",
"return",
"iobject",
",",
"created"
] | 48.246914 | 31.45679 |
def text(self, text, stylename=None):
"""Add text within the current container."""
assert self._containers
container = self._containers[-1]
if stylename is not None:
stylename = self._get_style_name(stylename)
container.addElement(Span(stylename=stylename, text=text))
else:
container.addElement(Span(text=text))
|
[
"def",
"text",
"(",
"self",
",",
"text",
",",
"stylename",
"=",
"None",
")",
":",
"assert",
"self",
".",
"_containers",
"container",
"=",
"self",
".",
"_containers",
"[",
"-",
"1",
"]",
"if",
"stylename",
"is",
"not",
"None",
":",
"stylename",
"=",
"self",
".",
"_get_style_name",
"(",
"stylename",
")",
"container",
".",
"addElement",
"(",
"Span",
"(",
"stylename",
"=",
"stylename",
",",
"text",
"=",
"text",
")",
")",
"else",
":",
"container",
".",
"addElement",
"(",
"Span",
"(",
"text",
"=",
"text",
")",
")"
] | 42.222222 | 11.111111 |
def action(self, observations):
""" Calculate value for given state """
observations = self.input_block(observations)
policy_hidden = self.policy_backbone(observations)
action = self.action_head(policy_hidden)
return action
|
[
"def",
"action",
"(",
"self",
",",
"observations",
")",
":",
"observations",
"=",
"self",
".",
"input_block",
"(",
"observations",
")",
"policy_hidden",
"=",
"self",
".",
"policy_backbone",
"(",
"observations",
")",
"action",
"=",
"self",
".",
"action_head",
"(",
"policy_hidden",
")",
"return",
"action"
] | 43 | 11.166667 |
def guess_mime_type(url):
"""Use the mimetypes module to lookup the type for an extension.
This function also adds some extensions required for HTML5
"""
(mimetype, _mimeencoding) = mimetypes.guess_type(url)
if not mimetype:
ext = os.path.splitext(url)[1]
mimetype = _MIME_TYPES.get(ext)
_logger.debug("mimetype({}): {}".format(url, mimetype))
if not mimetype:
mimetype = "application/octet-stream"
return mimetype
|
[
"def",
"guess_mime_type",
"(",
"url",
")",
":",
"(",
"mimetype",
",",
"_mimeencoding",
")",
"=",
"mimetypes",
".",
"guess_type",
"(",
"url",
")",
"if",
"not",
"mimetype",
":",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"url",
")",
"[",
"1",
"]",
"mimetype",
"=",
"_MIME_TYPES",
".",
"get",
"(",
"ext",
")",
"_logger",
".",
"debug",
"(",
"\"mimetype({}): {}\"",
".",
"format",
"(",
"url",
",",
"mimetype",
")",
")",
"if",
"not",
"mimetype",
":",
"mimetype",
"=",
"\"application/octet-stream\"",
"return",
"mimetype"
] | 35.615385 | 14.307692 |
def expose_ideal_query_execution_start_points(compound_match_query, location_types,
coerced_locations):
"""Ensure that OrientDB only considers desirable query start points in query planning."""
new_queries = []
for match_query in compound_match_query.match_queries:
location_classification = _classify_query_locations(match_query)
preferred_locations, eligible_locations, _ = location_classification
if preferred_locations:
# Convert all eligible locations into non-eligible ones, by removing
# their "class:" clause. The "class:" clause is provided either by having
# a QueryRoot block or a CoerceType block in the MatchStep corresponding
# to the location. We remove it by converting the class check into
# an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away.
new_query = _expose_only_preferred_locations(
match_query, location_types, coerced_locations,
preferred_locations, eligible_locations)
elif eligible_locations:
# Make sure that all eligible locations have a "class:" clause by adding
# a CoerceType block that is a no-op as guaranteed by the schema. This merely
# ensures that OrientDB is able to use each of these locations as a query start point,
# and will choose the one whose class is of lowest cardinality.
new_query = _expose_all_eligible_locations(
match_query, location_types, eligible_locations)
else:
raise AssertionError(u'This query has no preferred or eligible query start locations. '
u'This is almost certainly a bug: {}'.format(match_query))
new_queries.append(new_query)
return compound_match_query._replace(match_queries=new_queries)
|
[
"def",
"expose_ideal_query_execution_start_points",
"(",
"compound_match_query",
",",
"location_types",
",",
"coerced_locations",
")",
":",
"new_queries",
"=",
"[",
"]",
"for",
"match_query",
"in",
"compound_match_query",
".",
"match_queries",
":",
"location_classification",
"=",
"_classify_query_locations",
"(",
"match_query",
")",
"preferred_locations",
",",
"eligible_locations",
",",
"_",
"=",
"location_classification",
"if",
"preferred_locations",
":",
"# Convert all eligible locations into non-eligible ones, by removing",
"# their \"class:\" clause. The \"class:\" clause is provided either by having",
"# a QueryRoot block or a CoerceType block in the MatchStep corresponding",
"# to the location. We remove it by converting the class check into",
"# an \"INSTANCEOF\" Filter block, which OrientDB is unable to optimize away.",
"new_query",
"=",
"_expose_only_preferred_locations",
"(",
"match_query",
",",
"location_types",
",",
"coerced_locations",
",",
"preferred_locations",
",",
"eligible_locations",
")",
"elif",
"eligible_locations",
":",
"# Make sure that all eligible locations have a \"class:\" clause by adding",
"# a CoerceType block that is a no-op as guaranteed by the schema. This merely",
"# ensures that OrientDB is able to use each of these locations as a query start point,",
"# and will choose the one whose class is of lowest cardinality.",
"new_query",
"=",
"_expose_all_eligible_locations",
"(",
"match_query",
",",
"location_types",
",",
"eligible_locations",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"u'This query has no preferred or eligible query start locations. '",
"u'This is almost certainly a bug: {}'",
".",
"format",
"(",
"match_query",
")",
")",
"new_queries",
".",
"append",
"(",
"new_query",
")",
"return",
"compound_match_query",
".",
"_replace",
"(",
"match_queries",
"=",
"new_queries",
")"
] | 59.09375 | 31.625 |
def __build_url(self, api_call, **kwargs):
"""Builds the api query"""
kwargs['key'] = self.api_key
if 'language' not in kwargs:
kwargs['language'] = self.language
if 'format' not in kwargs:
kwargs['format'] = self.__format
api_query = urlencode(kwargs)
return "{0}{1}?{2}".format(urls.BASE_URL,
api_call,
api_query)
|
[
"def",
"__build_url",
"(",
"self",
",",
"api_call",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'key'",
"]",
"=",
"self",
".",
"api_key",
"if",
"'language'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'language'",
"]",
"=",
"self",
".",
"language",
"if",
"'format'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'format'",
"]",
"=",
"self",
".",
"__format",
"api_query",
"=",
"urlencode",
"(",
"kwargs",
")",
"return",
"\"{0}{1}?{2}\"",
".",
"format",
"(",
"urls",
".",
"BASE_URL",
",",
"api_call",
",",
"api_query",
")"
] | 37.25 | 7.25 |
def plot(self, figure_list):
'''
When each subscript is called, uses its standard plotting
Args:
figure_list: list of figures passed from the guit
'''
#TODO: be smarter about how we plot ScriptIterator
if self._current_subscript_stage is not None:
if self._current_subscript_stage['current_subscript'] is not None:
self._current_subscript_stage['current_subscript'].plot(figure_list)
if (self.is_running is False) and not (self.data == {} or self.data is None):
script_names = list(self.settings['script_order'].keys())
script_indices = [self.settings['script_order'][name] for name in script_names]
_, sorted_script_names = list(zip(*sorted(zip(script_indices, script_names))))
last_script = self.scripts[sorted_script_names[-1]]
last_script.force_update() # since we use the last script plot function we force it to refresh
axes_list = last_script.get_axes_layout(figure_list)
# catch error is _plot function doens't take optional data argument
try:
last_script._plot(axes_list, self.data)
except TypeError as err:
print((warnings.warn('can\'t plot average script data because script.plot function doens\'t take data as optional argument. Plotting last data set instead')))
print((err.message))
last_script.plot(figure_list)
|
[
"def",
"plot",
"(",
"self",
",",
"figure_list",
")",
":",
"#TODO: be smarter about how we plot ScriptIterator",
"if",
"self",
".",
"_current_subscript_stage",
"is",
"not",
"None",
":",
"if",
"self",
".",
"_current_subscript_stage",
"[",
"'current_subscript'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"_current_subscript_stage",
"[",
"'current_subscript'",
"]",
".",
"plot",
"(",
"figure_list",
")",
"if",
"(",
"self",
".",
"is_running",
"is",
"False",
")",
"and",
"not",
"(",
"self",
".",
"data",
"==",
"{",
"}",
"or",
"self",
".",
"data",
"is",
"None",
")",
":",
"script_names",
"=",
"list",
"(",
"self",
".",
"settings",
"[",
"'script_order'",
"]",
".",
"keys",
"(",
")",
")",
"script_indices",
"=",
"[",
"self",
".",
"settings",
"[",
"'script_order'",
"]",
"[",
"name",
"]",
"for",
"name",
"in",
"script_names",
"]",
"_",
",",
"sorted_script_names",
"=",
"list",
"(",
"zip",
"(",
"*",
"sorted",
"(",
"zip",
"(",
"script_indices",
",",
"script_names",
")",
")",
")",
")",
"last_script",
"=",
"self",
".",
"scripts",
"[",
"sorted_script_names",
"[",
"-",
"1",
"]",
"]",
"last_script",
".",
"force_update",
"(",
")",
"# since we use the last script plot function we force it to refresh",
"axes_list",
"=",
"last_script",
".",
"get_axes_layout",
"(",
"figure_list",
")",
"# catch error is _plot function doens't take optional data argument",
"try",
":",
"last_script",
".",
"_plot",
"(",
"axes_list",
",",
"self",
".",
"data",
")",
"except",
"TypeError",
"as",
"err",
":",
"print",
"(",
"(",
"warnings",
".",
"warn",
"(",
"'can\\'t plot average script data because script.plot function doens\\'t take data as optional argument. Plotting last data set instead'",
")",
")",
")",
"print",
"(",
"(",
"err",
".",
"message",
")",
")",
"last_script",
".",
"plot",
"(",
"figure_list",
")"
] | 44.606061 | 34.242424 |
def dmi(arg, n, high_col='high', low_col='low', close_col='close'):
""" Return the dmi+, dmi-, Average directional index
( http://en.wikipedia.org/wiki/Average_Directional_Index )
TODO - break up calcuat
"""
converted = arg[[close_col, high_col, low_col]]
converted.columns = ['close', 'high', 'low']
up_mv = converted.high.diff()
dn_mv = -1 * converted.low.diff()
up_mv[~((up_mv > 0) & (up_mv > dn_mv))] = 0
dn_mv[~((dn_mv > 0) & (dn_mv > up_mv))] = 0
tr = true_range(converted, 'high', 'low', 'close')
atr = wilderma(tr, n)
di_pos = 100. * wilderma(up_mv, n) / atr
di_neg = 100. * wilderma(dn_mv, n) / atr
dx = 100. * np.abs(di_pos - di_neg) / (di_pos + di_neg)
adx = wilderma(dx, n)
data = [
('DI+', di_pos),
('DI-', di_neg),
('DX', dx),
('ADX', adx),
]
return pd.DataFrame.from_items(data)
|
[
"def",
"dmi",
"(",
"arg",
",",
"n",
",",
"high_col",
"=",
"'high'",
",",
"low_col",
"=",
"'low'",
",",
"close_col",
"=",
"'close'",
")",
":",
"converted",
"=",
"arg",
"[",
"[",
"close_col",
",",
"high_col",
",",
"low_col",
"]",
"]",
"converted",
".",
"columns",
"=",
"[",
"'close'",
",",
"'high'",
",",
"'low'",
"]",
"up_mv",
"=",
"converted",
".",
"high",
".",
"diff",
"(",
")",
"dn_mv",
"=",
"-",
"1",
"*",
"converted",
".",
"low",
".",
"diff",
"(",
")",
"up_mv",
"[",
"~",
"(",
"(",
"up_mv",
">",
"0",
")",
"&",
"(",
"up_mv",
">",
"dn_mv",
")",
")",
"]",
"=",
"0",
"dn_mv",
"[",
"~",
"(",
"(",
"dn_mv",
">",
"0",
")",
"&",
"(",
"dn_mv",
">",
"up_mv",
")",
")",
"]",
"=",
"0",
"tr",
"=",
"true_range",
"(",
"converted",
",",
"'high'",
",",
"'low'",
",",
"'close'",
")",
"atr",
"=",
"wilderma",
"(",
"tr",
",",
"n",
")",
"di_pos",
"=",
"100.",
"*",
"wilderma",
"(",
"up_mv",
",",
"n",
")",
"/",
"atr",
"di_neg",
"=",
"100.",
"*",
"wilderma",
"(",
"dn_mv",
",",
"n",
")",
"/",
"atr",
"dx",
"=",
"100.",
"*",
"np",
".",
"abs",
"(",
"di_pos",
"-",
"di_neg",
")",
"/",
"(",
"di_pos",
"+",
"di_neg",
")",
"adx",
"=",
"wilderma",
"(",
"dx",
",",
"n",
")",
"data",
"=",
"[",
"(",
"'DI+'",
",",
"di_pos",
")",
",",
"(",
"'DI-'",
",",
"di_neg",
")",
",",
"(",
"'DX'",
",",
"dx",
")",
",",
"(",
"'ADX'",
",",
"adx",
")",
",",
"]",
"return",
"pd",
".",
"DataFrame",
".",
"from_items",
"(",
"data",
")"
] | 31.5 | 16.678571 |
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
`encoding` - The Unicode encoding (or character set) of
the file. The default is ``None``, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
`errors` - How to handle Unicode errors; see help(str.decode)
for the options. Default is ``'strict'``.
`retain` - If ``True``, retain newline characters; but all newline
character combinations (``'\r'``, ``'\n'``, ``'\r\n'``) are
translated to ``'\n'``. If ``False``, newline characters are
stripped off. Default is ``True``.
.. seealso:: :meth:`text`
"""
return self.text(encoding, errors).splitlines(retain)
|
[
"def",
"lines",
"(",
"self",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"'strict'",
",",
"retain",
"=",
"True",
")",
":",
"return",
"self",
".",
"text",
"(",
"encoding",
",",
"errors",
")",
".",
"splitlines",
"(",
"retain",
")"
] | 51.833333 | 23.055556 |
def must_stop(self):
"""
Return True if the worker must stop when the current loop is over.
"""
return bool(self.terminate_gracefuly and self.end_signal_caught
or self.num_loops >= self.max_loops or self.end_forced
or self.wanted_end_date and datetime.utcnow() >= self.wanted_end_date)
|
[
"def",
"must_stop",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"terminate_gracefuly",
"and",
"self",
".",
"end_signal_caught",
"or",
"self",
".",
"num_loops",
">=",
"self",
".",
"max_loops",
"or",
"self",
".",
"end_forced",
"or",
"self",
".",
"wanted_end_date",
"and",
"datetime",
".",
"utcnow",
"(",
")",
">=",
"self",
".",
"wanted_end_date",
")"
] | 49.285714 | 23.285714 |
def tail(f, window=20):
"""
Returns the last `window` lines of file `f` as a list.
@param window: the number of lines.
"""
if window == 0:
return []
BUFSIZ = 1024
f.seek(0, 2)
bytes = f.tell()
size = window + 1
block = -1
data = []
while size > 0 and bytes > 0:
if bytes - BUFSIZ > 0:
# Seek back one whole BUFSIZ
f.seek(block * BUFSIZ, 2)
# read BUFFER
data.insert(0, f.read(BUFSIZ).decode('utf-8', errors='ignore'))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
data.insert(0, f.read(bytes).decode('utf-8', errors='ignore'))
linesFound = data[0].count('\n')
size -= linesFound
bytes -= BUFSIZ
block -= 1
return ''.join(data).splitlines()[-window:]
|
[
"def",
"tail",
"(",
"f",
",",
"window",
"=",
"20",
")",
":",
"if",
"window",
"==",
"0",
":",
"return",
"[",
"]",
"BUFSIZ",
"=",
"1024",
"f",
".",
"seek",
"(",
"0",
",",
"2",
")",
"bytes",
"=",
"f",
".",
"tell",
"(",
")",
"size",
"=",
"window",
"+",
"1",
"block",
"=",
"-",
"1",
"data",
"=",
"[",
"]",
"while",
"size",
">",
"0",
"and",
"bytes",
">",
"0",
":",
"if",
"bytes",
"-",
"BUFSIZ",
">",
"0",
":",
"# Seek back one whole BUFSIZ",
"f",
".",
"seek",
"(",
"block",
"*",
"BUFSIZ",
",",
"2",
")",
"# read BUFFER",
"data",
".",
"insert",
"(",
"0",
",",
"f",
".",
"read",
"(",
"BUFSIZ",
")",
".",
"decode",
"(",
"'utf-8'",
",",
"errors",
"=",
"'ignore'",
")",
")",
"else",
":",
"# file too small, start from begining",
"f",
".",
"seek",
"(",
"0",
",",
"0",
")",
"# only read what was not read",
"data",
".",
"insert",
"(",
"0",
",",
"f",
".",
"read",
"(",
"bytes",
")",
".",
"decode",
"(",
"'utf-8'",
",",
"errors",
"=",
"'ignore'",
")",
")",
"linesFound",
"=",
"data",
"[",
"0",
"]",
".",
"count",
"(",
"'\\n'",
")",
"size",
"-=",
"linesFound",
"bytes",
"-=",
"BUFSIZ",
"block",
"-=",
"1",
"return",
"''",
".",
"join",
"(",
"data",
")",
".",
"splitlines",
"(",
")",
"[",
"-",
"window",
":",
"]"
] | 28.833333 | 15.9 |
def getDevicesById(self): # , query):
"""
rows = self.camera_collection.get(query)
devices_by_id = {}
for row in rows:
row.pop("classname")
device = DataModel.RTSPCameraDevice(**row)
devices_by_id[device._id] = device
return devices_by_id
"""
rows = self.camera_collection.get()
devices_by_id = {}
for row in rows:
classname=row.pop("classname")
if (classname == "RTSPCameraRow"):
device = DataModel.RTSPCameraDevice(**row)
elif (classname == "USBCameraRow"):
device = DataModel.USBCameraDevice(**row)
else:
device = None
if (device):
devices_by_id[device._id] = device
return devices_by_id
|
[
"def",
"getDevicesById",
"(",
"self",
")",
":",
"# , query):",
"rows",
"=",
"self",
".",
"camera_collection",
".",
"get",
"(",
")",
"devices_by_id",
"=",
"{",
"}",
"for",
"row",
"in",
"rows",
":",
"classname",
"=",
"row",
".",
"pop",
"(",
"\"classname\"",
")",
"if",
"(",
"classname",
"==",
"\"RTSPCameraRow\"",
")",
":",
"device",
"=",
"DataModel",
".",
"RTSPCameraDevice",
"(",
"*",
"*",
"row",
")",
"elif",
"(",
"classname",
"==",
"\"USBCameraRow\"",
")",
":",
"device",
"=",
"DataModel",
".",
"USBCameraDevice",
"(",
"*",
"*",
"row",
")",
"else",
":",
"device",
"=",
"None",
"if",
"(",
"device",
")",
":",
"devices_by_id",
"[",
"device",
".",
"_id",
"]",
"=",
"device",
"return",
"devices_by_id"
] | 35.130435 | 10.26087 |
def declare(full_table_name, definition, context):
"""
Parse declaration and create new SQL table accordingly.
:param full_table_name: full name of the table
:param definition: DataJoint table definition
:param context: dictionary of objects that might be referred to in the table.
"""
table_name = full_table_name.strip('`').split('.')[1]
if len(table_name) > MAX_TABLE_NAME_LENGTH:
raise DataJointError(
'Table name `{name}` exceeds the max length of {max_length}'.format(
name=table_name,
max_length=MAX_TABLE_NAME_LENGTH))
# split definition into lines
definition = re.split(r'\s*\n\s*', definition.strip())
# check for optional table comment
table_comment = definition.pop(0)[1:].strip() if definition[0].startswith('#') else ''
in_key = True # parse primary keys
primary_key = []
attributes = []
attribute_sql = []
foreign_key_sql = []
index_sql = []
uses_external = False
for line in definition:
if line.startswith('#'): # additional comments are ignored
pass
elif line.startswith('---') or line.startswith('___'):
in_key = False # start parsing dependent attributes
elif is_foreign_key(line):
compile_foreign_key(line, context, attributes,
primary_key if in_key else None,
attribute_sql, foreign_key_sql, index_sql)
elif re.match(r'^(unique\s+)?index[^:]*$', line, re.I): # index
compile_index(line, index_sql)
else:
name, sql, is_external = compile_attribute(line, in_key, foreign_key_sql)
uses_external = uses_external or is_external
if in_key and name not in primary_key:
primary_key.append(name)
if name not in attributes:
attributes.append(name)
attribute_sql.append(sql)
# compile SQL
if not primary_key:
raise DataJointError('Table must have a primary key')
return (
'CREATE TABLE IF NOT EXISTS %s (\n' % full_table_name +
',\n'.join(attribute_sql + ['PRIMARY KEY (`' + '`,`'.join(primary_key) + '`)'] + foreign_key_sql + index_sql) +
'\n) ENGINE=InnoDB, COMMENT "%s"' % table_comment), uses_external
|
[
"def",
"declare",
"(",
"full_table_name",
",",
"definition",
",",
"context",
")",
":",
"table_name",
"=",
"full_table_name",
".",
"strip",
"(",
"'`'",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"if",
"len",
"(",
"table_name",
")",
">",
"MAX_TABLE_NAME_LENGTH",
":",
"raise",
"DataJointError",
"(",
"'Table name `{name}` exceeds the max length of {max_length}'",
".",
"format",
"(",
"name",
"=",
"table_name",
",",
"max_length",
"=",
"MAX_TABLE_NAME_LENGTH",
")",
")",
"# split definition into lines",
"definition",
"=",
"re",
".",
"split",
"(",
"r'\\s*\\n\\s*'",
",",
"definition",
".",
"strip",
"(",
")",
")",
"# check for optional table comment",
"table_comment",
"=",
"definition",
".",
"pop",
"(",
"0",
")",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
"if",
"definition",
"[",
"0",
"]",
".",
"startswith",
"(",
"'#'",
")",
"else",
"''",
"in_key",
"=",
"True",
"# parse primary keys",
"primary_key",
"=",
"[",
"]",
"attributes",
"=",
"[",
"]",
"attribute_sql",
"=",
"[",
"]",
"foreign_key_sql",
"=",
"[",
"]",
"index_sql",
"=",
"[",
"]",
"uses_external",
"=",
"False",
"for",
"line",
"in",
"definition",
":",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"# additional comments are ignored",
"pass",
"elif",
"line",
".",
"startswith",
"(",
"'---'",
")",
"or",
"line",
".",
"startswith",
"(",
"'___'",
")",
":",
"in_key",
"=",
"False",
"# start parsing dependent attributes",
"elif",
"is_foreign_key",
"(",
"line",
")",
":",
"compile_foreign_key",
"(",
"line",
",",
"context",
",",
"attributes",
",",
"primary_key",
"if",
"in_key",
"else",
"None",
",",
"attribute_sql",
",",
"foreign_key_sql",
",",
"index_sql",
")",
"elif",
"re",
".",
"match",
"(",
"r'^(unique\\s+)?index[^:]*$'",
",",
"line",
",",
"re",
".",
"I",
")",
":",
"# index",
"compile_index",
"(",
"line",
",",
"index_sql",
")",
"else",
":",
"name",
",",
"sql",
",",
"is_external",
"=",
"compile_attribute",
"(",
"line",
",",
"in_key",
",",
"foreign_key_sql",
")",
"uses_external",
"=",
"uses_external",
"or",
"is_external",
"if",
"in_key",
"and",
"name",
"not",
"in",
"primary_key",
":",
"primary_key",
".",
"append",
"(",
"name",
")",
"if",
"name",
"not",
"in",
"attributes",
":",
"attributes",
".",
"append",
"(",
"name",
")",
"attribute_sql",
".",
"append",
"(",
"sql",
")",
"# compile SQL",
"if",
"not",
"primary_key",
":",
"raise",
"DataJointError",
"(",
"'Table must have a primary key'",
")",
"return",
"(",
"'CREATE TABLE IF NOT EXISTS %s (\\n'",
"%",
"full_table_name",
"+",
"',\\n'",
".",
"join",
"(",
"attribute_sql",
"+",
"[",
"'PRIMARY KEY (`'",
"+",
"'`,`'",
".",
"join",
"(",
"primary_key",
")",
"+",
"'`)'",
"]",
"+",
"foreign_key_sql",
"+",
"index_sql",
")",
"+",
"'\\n) ENGINE=InnoDB, COMMENT \"%s\"'",
"%",
"table_comment",
")",
",",
"uses_external"
] | 42.5 | 20.092593 |
def intersects_first(self,
ray_origins,
ray_directions):
"""
Find the index of the first triangle a ray hits.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
Returns
----------
triangle_index: (n,) int, index of triangle ray hit, or -1 if not hit
"""
ray_origins = np.asanyarray(deepcopy(ray_origins))
ray_directions = np.asanyarray(ray_directions)
triangle_index = self._scene.run(ray_origins,
ray_directions)
return triangle_index
|
[
"def",
"intersects_first",
"(",
"self",
",",
"ray_origins",
",",
"ray_directions",
")",
":",
"ray_origins",
"=",
"np",
".",
"asanyarray",
"(",
"deepcopy",
"(",
"ray_origins",
")",
")",
"ray_directions",
"=",
"np",
".",
"asanyarray",
"(",
"ray_directions",
")",
"triangle_index",
"=",
"self",
".",
"_scene",
".",
"run",
"(",
"ray_origins",
",",
"ray_directions",
")",
"return",
"triangle_index"
] | 30.130435 | 20.391304 |
def send_event(self, event_type, category=None, dimensions=None,
properties=None, timestamp=None):
"""Send an event to SignalFx.
Args:
event_type (string): the event type (name of the event time
series).
category (string): the category of the event.
dimensions (dict): a map of event dimensions.
properties (dict): a map of extra properties on that event.
timestamp (float): timestamp when the event has occured
"""
if category and category not in SUPPORTED_EVENT_CATEGORIES:
raise ValueError('Event category is not one of the supported' +
'types: {' +
', '.join(SUPPORTED_EVENT_CATEGORIES) + '}')
data = {
'eventType': event_type,
'category': category,
'dimensions': dimensions or {},
'properties': properties or {},
'timestamp': int(timestamp) if timestamp else None,
}
_logger.debug('Sending event to SignalFx: %s', data)
self._add_extra_dimensions(data)
return self._send_event(event_data=data, url='{0}/{1}'.format(
self._endpoint, self._INGEST_ENDPOINT_EVENT_SUFFIX),
session=self._session)
|
[
"def",
"send_event",
"(",
"self",
",",
"event_type",
",",
"category",
"=",
"None",
",",
"dimensions",
"=",
"None",
",",
"properties",
"=",
"None",
",",
"timestamp",
"=",
"None",
")",
":",
"if",
"category",
"and",
"category",
"not",
"in",
"SUPPORTED_EVENT_CATEGORIES",
":",
"raise",
"ValueError",
"(",
"'Event category is not one of the supported'",
"+",
"'types: {'",
"+",
"', '",
".",
"join",
"(",
"SUPPORTED_EVENT_CATEGORIES",
")",
"+",
"'}'",
")",
"data",
"=",
"{",
"'eventType'",
":",
"event_type",
",",
"'category'",
":",
"category",
",",
"'dimensions'",
":",
"dimensions",
"or",
"{",
"}",
",",
"'properties'",
":",
"properties",
"or",
"{",
"}",
",",
"'timestamp'",
":",
"int",
"(",
"timestamp",
")",
"if",
"timestamp",
"else",
"None",
",",
"}",
"_logger",
".",
"debug",
"(",
"'Sending event to SignalFx: %s'",
",",
"data",
")",
"self",
".",
"_add_extra_dimensions",
"(",
"data",
")",
"return",
"self",
".",
"_send_event",
"(",
"event_data",
"=",
"data",
",",
"url",
"=",
"'{0}/{1}'",
".",
"format",
"(",
"self",
".",
"_endpoint",
",",
"self",
".",
"_INGEST_ENDPOINT_EVENT_SUFFIX",
")",
",",
"session",
"=",
"self",
".",
"_session",
")"
] | 43.033333 | 19.766667 |
def _assemble_and_send_request(self):
"""
Fires off the Fedex request.
@warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(),
WHICH RESIDES ON FedexBaseService AND IS INHERITED.
"""
# Fire off the query.
return self.client.service.getPickupAvailability(
WebAuthenticationDetail=self.WebAuthenticationDetail,
ClientDetail=self.ClientDetail,
TransactionDetail=self.TransactionDetail,
Version=self.VersionId,
PickupType=self.PickupType,
AccountNumber=self.AccountNumber,
PickupAddress=self.PickupAddress,
PickupRequestType=self.PickupRequestType,
DispatchDate=self.DispatchDate,
NumberOfBusinessDays=self.NumberOfBusinessDays,
PackageReadyTime=self.PackageReadyTime,
CustomerCloseTime=self.CustomerCloseTime,
Carriers=self.Carriers,
ShipmentAttributes=self.ShipmentAttributes,
PackageDetails=self.PackageDetails
)
|
[
"def",
"_assemble_and_send_request",
"(",
"self",
")",
":",
"# Fire off the query.",
"return",
"self",
".",
"client",
".",
"service",
".",
"getPickupAvailability",
"(",
"WebAuthenticationDetail",
"=",
"self",
".",
"WebAuthenticationDetail",
",",
"ClientDetail",
"=",
"self",
".",
"ClientDetail",
",",
"TransactionDetail",
"=",
"self",
".",
"TransactionDetail",
",",
"Version",
"=",
"self",
".",
"VersionId",
",",
"PickupType",
"=",
"self",
".",
"PickupType",
",",
"AccountNumber",
"=",
"self",
".",
"AccountNumber",
",",
"PickupAddress",
"=",
"self",
".",
"PickupAddress",
",",
"PickupRequestType",
"=",
"self",
".",
"PickupRequestType",
",",
"DispatchDate",
"=",
"self",
".",
"DispatchDate",
",",
"NumberOfBusinessDays",
"=",
"self",
".",
"NumberOfBusinessDays",
",",
"PackageReadyTime",
"=",
"self",
".",
"PackageReadyTime",
",",
"CustomerCloseTime",
"=",
"self",
".",
"CustomerCloseTime",
",",
"Carriers",
"=",
"self",
".",
"Carriers",
",",
"ShipmentAttributes",
"=",
"self",
".",
"ShipmentAttributes",
",",
"PackageDetails",
"=",
"self",
".",
"PackageDetails",
")"
] | 40.153846 | 13.153846 |
def exit_config_mode(self, exit_config="exit all", pattern="#"):
"""Exit from configuration mode."""
return super(RadETXBase, self).exit_config_mode(
exit_config=exit_config, pattern=pattern
)
|
[
"def",
"exit_config_mode",
"(",
"self",
",",
"exit_config",
"=",
"\"exit all\"",
",",
"pattern",
"=",
"\"#\"",
")",
":",
"return",
"super",
"(",
"RadETXBase",
",",
"self",
")",
".",
"exit_config_mode",
"(",
"exit_config",
"=",
"exit_config",
",",
"pattern",
"=",
"pattern",
")"
] | 44.8 | 16.6 |
def remove(path, follow_symlink=False):
"""
Implements an remove function that will delete files, folder trees and symlink trees
1.) Remove a file
2.) Remove a symlink and follow into with a recursive rm if follow_symlink
3.) Remove directory with rmtree
Args:
path (str): path to remove
follow_symlink(bool): follow symlinks and removes whatever is in them
"""
if os.path.isfile(path):
os.remove(path)
elif os.path.islink(path):
if follow_symlink:
remove(os.readlink(path))
os.unlink(path)
else:
shutil.rmtree(path)
|
[
"def",
"remove",
"(",
"path",
",",
"follow_symlink",
"=",
"False",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"os",
".",
"remove",
"(",
"path",
")",
"elif",
"os",
".",
"path",
".",
"islink",
"(",
"path",
")",
":",
"if",
"follow_symlink",
":",
"remove",
"(",
"os",
".",
"readlink",
"(",
"path",
")",
")",
"os",
".",
"unlink",
"(",
"path",
")",
"else",
":",
"shutil",
".",
"rmtree",
"(",
"path",
")"
] | 29.95 | 19.05 |
def handle(self):
"""
Executes the command.
"""
creator = MigrationCreator()
name = self.argument("name")
table = self.option("table")
create = bool(self.option("create"))
if not table and create is not False:
table = create
path = self.option("path")
if path is None:
path = self._get_migration_path()
migration_name = self._write_migration(creator, name, table, create, path)
self.line("<info>Created migration:</info> {}".format(migration_name))
|
[
"def",
"handle",
"(",
"self",
")",
":",
"creator",
"=",
"MigrationCreator",
"(",
")",
"name",
"=",
"self",
".",
"argument",
"(",
"\"name\"",
")",
"table",
"=",
"self",
".",
"option",
"(",
"\"table\"",
")",
"create",
"=",
"bool",
"(",
"self",
".",
"option",
"(",
"\"create\"",
")",
")",
"if",
"not",
"table",
"and",
"create",
"is",
"not",
"False",
":",
"table",
"=",
"create",
"path",
"=",
"self",
".",
"option",
"(",
"\"path\"",
")",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"self",
".",
"_get_migration_path",
"(",
")",
"migration_name",
"=",
"self",
".",
"_write_migration",
"(",
"creator",
",",
"name",
",",
"table",
",",
"create",
",",
"path",
")",
"self",
".",
"line",
"(",
"\"<info>Created migration:</info> {}\"",
".",
"format",
"(",
"migration_name",
")",
")"
] | 27.7 | 18.8 |
def _raw_hex_id(obj):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(map(_replacer, packed))
|
[
"def",
"_raw_hex_id",
"(",
"obj",
")",
":",
"# interpret as a pointer since that's what really what id returns",
"packed",
"=",
"struct",
".",
"pack",
"(",
"'@P'",
",",
"id",
"(",
"obj",
")",
")",
"return",
"''",
".",
"join",
"(",
"map",
"(",
"_replacer",
",",
"packed",
")",
")"
] | 45 | 10.2 |
def get_values(self, *args, **kwargs):
"""
Convenience method that for simple single tag queries will
return just the values to be iterated on.
"""
if isinstance(args[0], list):
raise ValueError("Can only get_values() for a single tag.")
response = self.get_datapoints(*args, **kwargs)
for value in response['tags'][0]['results'][0]['values']:
yield [datetime.datetime.utcfromtimestamp(value[0]/1000),
value[1],
value[2]]
|
[
"def",
"get_values",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"Can only get_values() for a single tag.\"",
")",
"response",
"=",
"self",
".",
"get_datapoints",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"value",
"in",
"response",
"[",
"'tags'",
"]",
"[",
"0",
"]",
"[",
"'results'",
"]",
"[",
"0",
"]",
"[",
"'values'",
"]",
":",
"yield",
"[",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"value",
"[",
"0",
"]",
"/",
"1000",
")",
",",
"value",
"[",
"1",
"]",
",",
"value",
"[",
"2",
"]",
"]"
] | 40.615385 | 15.692308 |
def tag_and_push_image(self, image, target_image, insecure=False, force=False,
dockercfg=None):
"""
tag provided image and push it to registry
:param image: str or ImageName, image id or name
:param target_image: ImageName, img
:param insecure: bool, allow connecting to registry over plain http
:param force: bool, force the tag?
:param dockercfg: path to docker config
:return: str, image (reg.com/img:v1)
"""
logger.info("tagging and pushing image '%s' as '%s'", image, target_image)
logger.debug("image = '%s', target_image = '%s'", image, target_image)
self.tag_image(image, target_image, force=force)
if dockercfg:
self.login(registry=target_image.registry, docker_secret_path=dockercfg)
return self.push_image(target_image, insecure=insecure)
|
[
"def",
"tag_and_push_image",
"(",
"self",
",",
"image",
",",
"target_image",
",",
"insecure",
"=",
"False",
",",
"force",
"=",
"False",
",",
"dockercfg",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"\"tagging and pushing image '%s' as '%s'\"",
",",
"image",
",",
"target_image",
")",
"logger",
".",
"debug",
"(",
"\"image = '%s', target_image = '%s'\"",
",",
"image",
",",
"target_image",
")",
"self",
".",
"tag_image",
"(",
"image",
",",
"target_image",
",",
"force",
"=",
"force",
")",
"if",
"dockercfg",
":",
"self",
".",
"login",
"(",
"registry",
"=",
"target_image",
".",
"registry",
",",
"docker_secret_path",
"=",
"dockercfg",
")",
"return",
"self",
".",
"push_image",
"(",
"target_image",
",",
"insecure",
"=",
"insecure",
")"
] | 49.111111 | 18.888889 |
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
|
[
"def",
"_trigger_event",
"(",
"self",
",",
"event",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"run_async",
"=",
"kwargs",
".",
"pop",
"(",
"'run_async'",
",",
"False",
")",
"if",
"event",
"in",
"self",
".",
"handlers",
":",
"if",
"run_async",
":",
"return",
"self",
".",
"start_background_task",
"(",
"self",
".",
"handlers",
"[",
"event",
"]",
",",
"*",
"args",
")",
"else",
":",
"try",
":",
"return",
"self",
".",
"handlers",
"[",
"event",
"]",
"(",
"*",
"args",
")",
"except",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"event",
"+",
"' handler error'",
")"
] | 41.363636 | 16.272727 |
def mark(self, channel, ts):
""" https://api.slack.com/methods/im.mark
"""
self.params.update({
'channel': channel,
'ts': ts,
})
return FromUrl('https://slack.com/api/im.mark', self._requests)(data=self.params).post()
|
[
"def",
"mark",
"(",
"self",
",",
"channel",
",",
"ts",
")",
":",
"self",
".",
"params",
".",
"update",
"(",
"{",
"'channel'",
":",
"channel",
",",
"'ts'",
":",
"ts",
",",
"}",
")",
"return",
"FromUrl",
"(",
"'https://slack.com/api/im.mark'",
",",
"self",
".",
"_requests",
")",
"(",
"data",
"=",
"self",
".",
"params",
")",
".",
"post",
"(",
")"
] | 35.375 | 16.125 |
def _seconds_as_string(seconds):
"""
Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s'
"""
TIME_UNITS = [('s', 60), ('m', 60), ('h', 24), ('d', None)]
unit_strings = []
cur = max(int(seconds), 1)
for suffix, size in TIME_UNITS:
if size is not None:
cur, rest = divmod(cur, size)
else:
rest = cur
if rest > 0:
unit_strings.insert(0, '%d%s' % (rest, suffix))
return ' '.join(unit_strings)
|
[
"def",
"_seconds_as_string",
"(",
"seconds",
")",
":",
"TIME_UNITS",
"=",
"[",
"(",
"'s'",
",",
"60",
")",
",",
"(",
"'m'",
",",
"60",
")",
",",
"(",
"'h'",
",",
"24",
")",
",",
"(",
"'d'",
",",
"None",
")",
"]",
"unit_strings",
"=",
"[",
"]",
"cur",
"=",
"max",
"(",
"int",
"(",
"seconds",
")",
",",
"1",
")",
"for",
"suffix",
",",
"size",
"in",
"TIME_UNITS",
":",
"if",
"size",
"is",
"not",
"None",
":",
"cur",
",",
"rest",
"=",
"divmod",
"(",
"cur",
",",
"size",
")",
"else",
":",
"rest",
"=",
"cur",
"if",
"rest",
">",
"0",
":",
"unit_strings",
".",
"insert",
"(",
"0",
",",
"'%d%s'",
"%",
"(",
"rest",
",",
"suffix",
")",
")",
"return",
"' '",
".",
"join",
"(",
"unit_strings",
")"
] | 31.933333 | 13.133333 |
def channels(self):
"""channel count or 0 for unknown"""
# from ProgramConfigElement()
if hasattr(self, "pce_channels"):
return self.pce_channels
conf = getattr(
self, "extensionChannelConfiguration", self.channelConfiguration)
if conf == 1:
if self.psPresentFlag == -1:
return 0
elif self.psPresentFlag == 1:
return 2
else:
return 1
elif conf == 7:
return 8
elif conf > 7:
return 0
else:
return conf
|
[
"def",
"channels",
"(",
"self",
")",
":",
"# from ProgramConfigElement()",
"if",
"hasattr",
"(",
"self",
",",
"\"pce_channels\"",
")",
":",
"return",
"self",
".",
"pce_channels",
"conf",
"=",
"getattr",
"(",
"self",
",",
"\"extensionChannelConfiguration\"",
",",
"self",
".",
"channelConfiguration",
")",
"if",
"conf",
"==",
"1",
":",
"if",
"self",
".",
"psPresentFlag",
"==",
"-",
"1",
":",
"return",
"0",
"elif",
"self",
".",
"psPresentFlag",
"==",
"1",
":",
"return",
"2",
"else",
":",
"return",
"1",
"elif",
"conf",
"==",
"7",
":",
"return",
"8",
"elif",
"conf",
">",
"7",
":",
"return",
"0",
"else",
":",
"return",
"conf"
] | 25.608696 | 17.956522 |
def download_data(cdbs_root, verbose=True, dry_run=False):
"""Download CDBS data files to given root directory.
Download is skipped if a data file already exists.
Parameters
----------
cdbs_root : str
Root directory for CDBS data files.
verbose : bool
Print extra information to screen.
dry_run : bool
Go through the logic but skip the actual download.
This would return a list of files that *would have been*
downloaded without network calls.
Use this option for debugging or testing.
Raises
------
OSError
Problem with directory.
Returns
-------
file_list : list of str
A list of downloaded files.
"""
from .config import conf # Avoid potential circular import
if not os.path.exists(cdbs_root):
os.makedirs(cdbs_root, exist_ok=True)
if verbose: # pragma: no cover
print('Created {}'.format(cdbs_root))
elif not os.path.isdir(cdbs_root):
raise OSError('{} must be a directory'.format(cdbs_root))
host = 'http://ssb.stsci.edu/cdbs/'
file_list = []
if not cdbs_root.endswith(os.sep):
cdbs_root += os.sep
# See https://github.com/astropy/astropy/issues/8524
for cfgitem in conf.__class__.__dict__.values():
if (not isinstance(cfgitem, ConfigItem) or
not cfgitem.name.endswith('file')):
continue
url = cfgitem()
if not url.startswith(host):
if verbose: # pragma: no cover
print('{} is not from {}, skipping download'.format(
url, host))
continue
dst = url.replace(host, cdbs_root).replace('/', os.sep)
if os.path.exists(dst):
if verbose: # pragma: no cover
print('{} already exists, skipping download'.format(dst))
continue
# Create sub-directories, if needed.
subdirs = os.path.dirname(dst)
os.makedirs(subdirs, exist_ok=True)
if not dry_run: # pragma: no cover
try:
src = download_file(url)
copyfile(src, dst)
except Exception as exc:
print('Download failed - {}'.format(str(exc)))
continue
file_list.append(dst)
if verbose: # pragma: no cover
print('{} downloaded to {}'.format(url, dst))
return file_list
|
[
"def",
"download_data",
"(",
"cdbs_root",
",",
"verbose",
"=",
"True",
",",
"dry_run",
"=",
"False",
")",
":",
"from",
".",
"config",
"import",
"conf",
"# Avoid potential circular import",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cdbs_root",
")",
":",
"os",
".",
"makedirs",
"(",
"cdbs_root",
",",
"exist_ok",
"=",
"True",
")",
"if",
"verbose",
":",
"# pragma: no cover",
"print",
"(",
"'Created {}'",
".",
"format",
"(",
"cdbs_root",
")",
")",
"elif",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"cdbs_root",
")",
":",
"raise",
"OSError",
"(",
"'{} must be a directory'",
".",
"format",
"(",
"cdbs_root",
")",
")",
"host",
"=",
"'http://ssb.stsci.edu/cdbs/'",
"file_list",
"=",
"[",
"]",
"if",
"not",
"cdbs_root",
".",
"endswith",
"(",
"os",
".",
"sep",
")",
":",
"cdbs_root",
"+=",
"os",
".",
"sep",
"# See https://github.com/astropy/astropy/issues/8524",
"for",
"cfgitem",
"in",
"conf",
".",
"__class__",
".",
"__dict__",
".",
"values",
"(",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"cfgitem",
",",
"ConfigItem",
")",
"or",
"not",
"cfgitem",
".",
"name",
".",
"endswith",
"(",
"'file'",
")",
")",
":",
"continue",
"url",
"=",
"cfgitem",
"(",
")",
"if",
"not",
"url",
".",
"startswith",
"(",
"host",
")",
":",
"if",
"verbose",
":",
"# pragma: no cover",
"print",
"(",
"'{} is not from {}, skipping download'",
".",
"format",
"(",
"url",
",",
"host",
")",
")",
"continue",
"dst",
"=",
"url",
".",
"replace",
"(",
"host",
",",
"cdbs_root",
")",
".",
"replace",
"(",
"'/'",
",",
"os",
".",
"sep",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dst",
")",
":",
"if",
"verbose",
":",
"# pragma: no cover",
"print",
"(",
"'{} already exists, skipping download'",
".",
"format",
"(",
"dst",
")",
")",
"continue",
"# Create sub-directories, if needed.",
"subdirs",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"dst",
")",
"os",
".",
"makedirs",
"(",
"subdirs",
",",
"exist_ok",
"=",
"True",
")",
"if",
"not",
"dry_run",
":",
"# pragma: no cover",
"try",
":",
"src",
"=",
"download_file",
"(",
"url",
")",
"copyfile",
"(",
"src",
",",
"dst",
")",
"except",
"Exception",
"as",
"exc",
":",
"print",
"(",
"'Download failed - {}'",
".",
"format",
"(",
"str",
"(",
"exc",
")",
")",
")",
"continue",
"file_list",
".",
"append",
"(",
"dst",
")",
"if",
"verbose",
":",
"# pragma: no cover",
"print",
"(",
"'{} downloaded to {}'",
".",
"format",
"(",
"url",
",",
"dst",
")",
")",
"return",
"file_list"
] | 28.829268 | 19.231707 |
def DFS_Tree(G):
"""
Return an oriented tree constructed from dfs.
"""
if not G.vertices:
raise GraphInsertError("This graph have no vertices.")
pred = {}
T = digraph.DiGraph()
vertex_data = DFS(G)
for vertex in vertex_data:
pred[vertex] = vertex_data[vertex][0]
queue = Queue()
for vertex in pred:
if pred[vertex] == None:
queue.put(vertex)
while queue.qsize() > 0:
current = queue.get()
for element in pred:
if pred[element] == current:
T.add_edge(current, element)
queue.put(element)
return T
|
[
"def",
"DFS_Tree",
"(",
"G",
")",
":",
"if",
"not",
"G",
".",
"vertices",
":",
"raise",
"GraphInsertError",
"(",
"\"This graph have no vertices.\"",
")",
"pred",
"=",
"{",
"}",
"T",
"=",
"digraph",
".",
"DiGraph",
"(",
")",
"vertex_data",
"=",
"DFS",
"(",
"G",
")",
"for",
"vertex",
"in",
"vertex_data",
":",
"pred",
"[",
"vertex",
"]",
"=",
"vertex_data",
"[",
"vertex",
"]",
"[",
"0",
"]",
"queue",
"=",
"Queue",
"(",
")",
"for",
"vertex",
"in",
"pred",
":",
"if",
"pred",
"[",
"vertex",
"]",
"==",
"None",
":",
"queue",
".",
"put",
"(",
"vertex",
")",
"while",
"queue",
".",
"qsize",
"(",
")",
">",
"0",
":",
"current",
"=",
"queue",
".",
"get",
"(",
")",
"for",
"element",
"in",
"pred",
":",
"if",
"pred",
"[",
"element",
"]",
"==",
"current",
":",
"T",
".",
"add_edge",
"(",
"current",
",",
"element",
")",
"queue",
".",
"put",
"(",
"element",
")",
"return",
"T"
] | 29.045455 | 12.136364 |
def run_command_orig(cmd):
""" No idea how th f to get this to work """
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
os.killpg(os.getpgid(pro.pid), signal.SIGTERM)
else:
raise BadRCError("Bad rc (%s) for cmd '%s': %s" % (process.returncode, cmd, stdout + stderr))
return stdout
|
[
"def",
"run_command_orig",
"(",
"cmd",
")",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"stdout",
",",
"stderr",
"=",
"process",
".",
"communicate",
"(",
")",
"if",
"process",
".",
"returncode",
"==",
"0",
":",
"os",
".",
"killpg",
"(",
"os",
".",
"getpgid",
"(",
"pro",
".",
"pid",
")",
",",
"signal",
".",
"SIGTERM",
")",
"else",
":",
"raise",
"BadRCError",
"(",
"\"Bad rc (%s) for cmd '%s': %s\"",
"%",
"(",
"process",
".",
"returncode",
",",
"cmd",
",",
"stdout",
"+",
"stderr",
")",
")",
"return",
"stdout"
] | 47 | 23.222222 |
def new(cls, user, provider, federated_id):
"""
Create a new login
:param user: AuthUser
:param provider: str - ie: facebook, twitter, ...
:param federated_id: str - an id associated to provider
:return:
"""
if cls.get_user(provider, federated_id):
raise exceptions.AuthError("Federation already")
return cls.create(user_id=user.id,
provider=provider,
federated_id=federated_id)
|
[
"def",
"new",
"(",
"cls",
",",
"user",
",",
"provider",
",",
"federated_id",
")",
":",
"if",
"cls",
".",
"get_user",
"(",
"provider",
",",
"federated_id",
")",
":",
"raise",
"exceptions",
".",
"AuthError",
"(",
"\"Federation already\"",
")",
"return",
"cls",
".",
"create",
"(",
"user_id",
"=",
"user",
".",
"id",
",",
"provider",
"=",
"provider",
",",
"federated_id",
"=",
"federated_id",
")"
] | 35.857143 | 12.714286 |
def _parse_recipients(self, to):
"""Make sure we have a "," separated list of recipients
:param to: Recipient(s)
:type to: (str,
list,
:class:`pyfilemail.Contact`,
:class:`pyfilemail.Group`
)
:rtype: ``str``
"""
if to is None:
return None
if isinstance(to, list):
recipients = []
for recipient in to:
if isinstance(recipient, dict):
if 'contactgroupname' in recipient:
recipients.append(recipient['contactgroupname'])
else:
recipients.append(recipient.get('email'))
else:
recipients.append(recipient)
elif isinstance(to, basestring):
if ',' in to:
recipients = to.strip().split(',')
else:
recipients = [to]
return ', '.join(recipients)
|
[
"def",
"_parse_recipients",
"(",
"self",
",",
"to",
")",
":",
"if",
"to",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"to",
",",
"list",
")",
":",
"recipients",
"=",
"[",
"]",
"for",
"recipient",
"in",
"to",
":",
"if",
"isinstance",
"(",
"recipient",
",",
"dict",
")",
":",
"if",
"'contactgroupname'",
"in",
"recipient",
":",
"recipients",
".",
"append",
"(",
"recipient",
"[",
"'contactgroupname'",
"]",
")",
"else",
":",
"recipients",
".",
"append",
"(",
"recipient",
".",
"get",
"(",
"'email'",
")",
")",
"else",
":",
"recipients",
".",
"append",
"(",
"recipient",
")",
"elif",
"isinstance",
"(",
"to",
",",
"basestring",
")",
":",
"if",
"','",
"in",
"to",
":",
"recipients",
"=",
"to",
".",
"strip",
"(",
")",
".",
"split",
"(",
"','",
")",
"else",
":",
"recipients",
"=",
"[",
"to",
"]",
"return",
"', '",
".",
"join",
"(",
"recipients",
")"
] | 26.702703 | 18.972973 |
def goassoc_fieldmap(relationship_type=ACTS_UPSTREAM_OF_OR_WITHIN):
"""
Returns a mapping of canonical monarch fields to amigo-golr.
See: https://github.com/geneontology/amigo/blob/master/metadata/ann-config.yaml
"""
return {
M.SUBJECT: 'bioentity',
M.SUBJECT_CLOSURE: 'bioentity',
## In the GO AmiGO instance, the type field is not correctly populated
## See above in the code for hack that restores this for planteome instance
## M.SUBJECT_CATEGORY: 'type',
M.SUBJECT_CATEGORY: None,
M.SUBJECT_LABEL: 'bioentity_label',
M.SUBJECT_TAXON: 'taxon',
M.SUBJECT_TAXON_LABEL: 'taxon_label',
M.SUBJECT_TAXON_CLOSURE: 'taxon_closure',
M.RELATION: 'qualifier',
M.OBJECT: 'annotation_class',
M.OBJECT_CLOSURE: REGULATES_CLOSURE if relationship_type == ACTS_UPSTREAM_OF_OR_WITHIN else ISA_PARTOF_CLOSURE,
M.OBJECT_LABEL: 'annotation_class_label',
M.OBJECT_TAXON: 'object_taxon',
M.OBJECT_TAXON_LABEL: 'object_taxon_label',
M.OBJECT_TAXON_CLOSURE: 'object_taxon_closure',
M.OBJECT_CATEGORY: None,
M.EVIDENCE_OBJECT_CLOSURE: 'evidence_subset_closure',
M.IS_DEFINED_BY: 'assigned_by'
}
|
[
"def",
"goassoc_fieldmap",
"(",
"relationship_type",
"=",
"ACTS_UPSTREAM_OF_OR_WITHIN",
")",
":",
"return",
"{",
"M",
".",
"SUBJECT",
":",
"'bioentity'",
",",
"M",
".",
"SUBJECT_CLOSURE",
":",
"'bioentity'",
",",
"## In the GO AmiGO instance, the type field is not correctly populated",
"## See above in the code for hack that restores this for planteome instance",
"## M.SUBJECT_CATEGORY: 'type',",
"M",
".",
"SUBJECT_CATEGORY",
":",
"None",
",",
"M",
".",
"SUBJECT_LABEL",
":",
"'bioentity_label'",
",",
"M",
".",
"SUBJECT_TAXON",
":",
"'taxon'",
",",
"M",
".",
"SUBJECT_TAXON_LABEL",
":",
"'taxon_label'",
",",
"M",
".",
"SUBJECT_TAXON_CLOSURE",
":",
"'taxon_closure'",
",",
"M",
".",
"RELATION",
":",
"'qualifier'",
",",
"M",
".",
"OBJECT",
":",
"'annotation_class'",
",",
"M",
".",
"OBJECT_CLOSURE",
":",
"REGULATES_CLOSURE",
"if",
"relationship_type",
"==",
"ACTS_UPSTREAM_OF_OR_WITHIN",
"else",
"ISA_PARTOF_CLOSURE",
",",
"M",
".",
"OBJECT_LABEL",
":",
"'annotation_class_label'",
",",
"M",
".",
"OBJECT_TAXON",
":",
"'object_taxon'",
",",
"M",
".",
"OBJECT_TAXON_LABEL",
":",
"'object_taxon_label'",
",",
"M",
".",
"OBJECT_TAXON_CLOSURE",
":",
"'object_taxon_closure'",
",",
"M",
".",
"OBJECT_CATEGORY",
":",
"None",
",",
"M",
".",
"EVIDENCE_OBJECT_CLOSURE",
":",
"'evidence_subset_closure'",
",",
"M",
".",
"IS_DEFINED_BY",
":",
"'assigned_by'",
"}"
] | 42.413793 | 17.862069 |
def bindata(df, delta=1.0, method="average"):
"""
Bin average the index (usually pressure) to a given interval (default
delta = 1).
"""
start = np.floor(df.index[0])
stop = np.ceil(df.index[-1])
new_index = np.arange(start, stop, delta)
binned = pd.cut(df.index, bins=new_index)
if method == "average":
new_df = df.groupby(binned).mean()
new_df.index = new_index[:-1]
elif method == "interpolate":
raise NotImplementedError(
"Bin-average via interpolation method is not Implemented yet."
)
else:
raise ValueError(
f"Expected method `average` or `interpolate`, but got {method}."
)
return new_df
|
[
"def",
"bindata",
"(",
"df",
",",
"delta",
"=",
"1.0",
",",
"method",
"=",
"\"average\"",
")",
":",
"start",
"=",
"np",
".",
"floor",
"(",
"df",
".",
"index",
"[",
"0",
"]",
")",
"stop",
"=",
"np",
".",
"ceil",
"(",
"df",
".",
"index",
"[",
"-",
"1",
"]",
")",
"new_index",
"=",
"np",
".",
"arange",
"(",
"start",
",",
"stop",
",",
"delta",
")",
"binned",
"=",
"pd",
".",
"cut",
"(",
"df",
".",
"index",
",",
"bins",
"=",
"new_index",
")",
"if",
"method",
"==",
"\"average\"",
":",
"new_df",
"=",
"df",
".",
"groupby",
"(",
"binned",
")",
".",
"mean",
"(",
")",
"new_df",
".",
"index",
"=",
"new_index",
"[",
":",
"-",
"1",
"]",
"elif",
"method",
"==",
"\"interpolate\"",
":",
"raise",
"NotImplementedError",
"(",
"\"Bin-average via interpolation method is not Implemented yet.\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"f\"Expected method `average` or `interpolate`, but got {method}.\"",
")",
"return",
"new_df"
] | 31.545455 | 16.363636 |
def recall(ntp, nfn):
'''
This calculates recall.
https://en.wikipedia.org/wiki/Precision_and_recall
Parameters
----------
ntp : int
The number of true positives.
nfn : int
The number of false negatives.
Returns
-------
float
The precision calculated using `ntp/(ntp + nfn)`.
'''
if (ntp+nfn) > 0:
return ntp/(ntp+nfn)
else:
return np.nan
|
[
"def",
"recall",
"(",
"ntp",
",",
"nfn",
")",
":",
"if",
"(",
"ntp",
"+",
"nfn",
")",
">",
"0",
":",
"return",
"ntp",
"/",
"(",
"ntp",
"+",
"nfn",
")",
"else",
":",
"return",
"np",
".",
"nan"
] | 15.259259 | 27.037037 |
def add(self, resource):
"""Add a resource to the context"""
if isinstance(resource, Resource):
if isinstance(resource, Secret) and \
resource.mount != 'cubbyhole':
ensure_backend(resource,
SecretBackend,
self._mounts,
self.opt,
False)
elif isinstance(resource, Mount):
ensure_backend(resource, SecretBackend, self._mounts, self.opt)
elif isinstance(resource, Auth):
ensure_backend(resource, AuthBackend, self._auths, self.opt)
elif isinstance(resource, AuditLog):
ensure_backend(resource, LogBackend, self._logs, self.opt)
self._resources.append(resource)
else:
msg = "Unknown resource %s being " \
"added to context" % resource.__class__
raise aomi_excep.AomiError(msg)
|
[
"def",
"add",
"(",
"self",
",",
"resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"Resource",
")",
":",
"if",
"isinstance",
"(",
"resource",
",",
"Secret",
")",
"and",
"resource",
".",
"mount",
"!=",
"'cubbyhole'",
":",
"ensure_backend",
"(",
"resource",
",",
"SecretBackend",
",",
"self",
".",
"_mounts",
",",
"self",
".",
"opt",
",",
"False",
")",
"elif",
"isinstance",
"(",
"resource",
",",
"Mount",
")",
":",
"ensure_backend",
"(",
"resource",
",",
"SecretBackend",
",",
"self",
".",
"_mounts",
",",
"self",
".",
"opt",
")",
"elif",
"isinstance",
"(",
"resource",
",",
"Auth",
")",
":",
"ensure_backend",
"(",
"resource",
",",
"AuthBackend",
",",
"self",
".",
"_auths",
",",
"self",
".",
"opt",
")",
"elif",
"isinstance",
"(",
"resource",
",",
"AuditLog",
")",
":",
"ensure_backend",
"(",
"resource",
",",
"LogBackend",
",",
"self",
".",
"_logs",
",",
"self",
".",
"opt",
")",
"self",
".",
"_resources",
".",
"append",
"(",
"resource",
")",
"else",
":",
"msg",
"=",
"\"Unknown resource %s being \"",
"\"added to context\"",
"%",
"resource",
".",
"__class__",
"raise",
"aomi_excep",
".",
"AomiError",
"(",
"msg",
")"
] | 44.545455 | 12.227273 |
def fix_e305(self, result):
"""Add missing 2 blank lines after end of function or class."""
cr = '\n'
# check comment line
offset = result['line'] - 2
while True:
if offset < 0:
break
line = self.source[offset].lstrip()
if len(line) == 0:
break
if line[0] != '#':
break
offset -= 1
offset += 1
self.source[offset] = cr + self.source[offset]
|
[
"def",
"fix_e305",
"(",
"self",
",",
"result",
")",
":",
"cr",
"=",
"'\\n'",
"# check comment line",
"offset",
"=",
"result",
"[",
"'line'",
"]",
"-",
"2",
"while",
"True",
":",
"if",
"offset",
"<",
"0",
":",
"break",
"line",
"=",
"self",
".",
"source",
"[",
"offset",
"]",
".",
"lstrip",
"(",
")",
"if",
"len",
"(",
"line",
")",
"==",
"0",
":",
"break",
"if",
"line",
"[",
"0",
"]",
"!=",
"'#'",
":",
"break",
"offset",
"-=",
"1",
"offset",
"+=",
"1",
"self",
".",
"source",
"[",
"offset",
"]",
"=",
"cr",
"+",
"self",
".",
"source",
"[",
"offset",
"]"
] | 30.5625 | 14 |
def query_recent(num=8, **kwargs):
'''
query recent posts.
'''
order_by_create = kwargs.get('order_by_create', False)
kind = kwargs.get('kind', None)
if order_by_create:
if kind:
recent_recs = TabPost.select().where(
(TabPost.kind == kind) & (TabPost.valid == 1)
).order_by(
TabPost.time_create.desc()
).limit(num)
else:
recent_recs = TabPost.select().where(
TabPost.valid == 1
).order_by(
TabPost.time_create.desc()
).limit(num)
else:
if kind:
recent_recs = TabPost.select().where(
(TabPost.kind == kind) & (TabPost.valid == 1)
).order_by(
TabPost.time_update.desc()
).limit(num)
else:
recent_recs = TabPost.select().where(
TabPost.valid == 1
).order_by(
TabPost.time_update.desc()
).limit(num)
return recent_recs
|
[
"def",
"query_recent",
"(",
"num",
"=",
"8",
",",
"*",
"*",
"kwargs",
")",
":",
"order_by_create",
"=",
"kwargs",
".",
"get",
"(",
"'order_by_create'",
",",
"False",
")",
"kind",
"=",
"kwargs",
".",
"get",
"(",
"'kind'",
",",
"None",
")",
"if",
"order_by_create",
":",
"if",
"kind",
":",
"recent_recs",
"=",
"TabPost",
".",
"select",
"(",
")",
".",
"where",
"(",
"(",
"TabPost",
".",
"kind",
"==",
"kind",
")",
"&",
"(",
"TabPost",
".",
"valid",
"==",
"1",
")",
")",
".",
"order_by",
"(",
"TabPost",
".",
"time_create",
".",
"desc",
"(",
")",
")",
".",
"limit",
"(",
"num",
")",
"else",
":",
"recent_recs",
"=",
"TabPost",
".",
"select",
"(",
")",
".",
"where",
"(",
"TabPost",
".",
"valid",
"==",
"1",
")",
".",
"order_by",
"(",
"TabPost",
".",
"time_create",
".",
"desc",
"(",
")",
")",
".",
"limit",
"(",
"num",
")",
"else",
":",
"if",
"kind",
":",
"recent_recs",
"=",
"TabPost",
".",
"select",
"(",
")",
".",
"where",
"(",
"(",
"TabPost",
".",
"kind",
"==",
"kind",
")",
"&",
"(",
"TabPost",
".",
"valid",
"==",
"1",
")",
")",
".",
"order_by",
"(",
"TabPost",
".",
"time_update",
".",
"desc",
"(",
")",
")",
".",
"limit",
"(",
"num",
")",
"else",
":",
"recent_recs",
"=",
"TabPost",
".",
"select",
"(",
")",
".",
"where",
"(",
"TabPost",
".",
"valid",
"==",
"1",
")",
".",
"order_by",
"(",
"TabPost",
".",
"time_update",
".",
"desc",
"(",
")",
")",
".",
"limit",
"(",
"num",
")",
"return",
"recent_recs"
] | 34.727273 | 14.242424 |
def upsert_and_fetch(coll, doc, **kwargs):
"""
Fetch exactly one matching document or upsert
the document if not found, returning the matching
or upserted document.
See https://jira.mongodb.org/browse/SERVER-28434
describing the condition where MongoDB is uninterested in
providing an upsert and fetch behavior.
>>> instance = getfixture('mongodb_instance').get_connection()
>>> coll = instance.test_upsert_and_fetch.items
>>> doc = {'foo': 'bar'}
>>> inserted = upsert_and_fetch(coll, doc)
>>> inserted
{...'foo': 'bar'...}
>>> upsert_and_fetch(coll, doc) == inserted
True
"""
return coll.find_one_and_update(
doc,
{"$setOnInsert": doc},
upsert=True,
return_document=pymongo.ReturnDocument.AFTER,
**kwargs
)
|
[
"def",
"upsert_and_fetch",
"(",
"coll",
",",
"doc",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"coll",
".",
"find_one_and_update",
"(",
"doc",
",",
"{",
"\"$setOnInsert\"",
":",
"doc",
"}",
",",
"upsert",
"=",
"True",
",",
"return_document",
"=",
"pymongo",
".",
"ReturnDocument",
".",
"AFTER",
",",
"*",
"*",
"kwargs",
")"
] | 30.653846 | 16.269231 |
def setCompleteRedYellowGreenDefinition(self, tlsID, tls):
"""setCompleteRedYellowGreenDefinition(string, ) -> None
.
"""
length = 1 + 4 + 1 + 4 + \
len(tls._subID) + 1 + 4 + 1 + 4 + 1 + 4 + 1 + 4 # tls parameter
itemNo = 1 + 1 + 1 + 1 + 1
for p in tls._phases:
length += 1 + 4 + 1 + 4 + 1 + 4 + 1 + 4 + len(p._phaseDef)
itemNo += 4
self._connection._beginMessage(
tc.CMD_SET_TL_VARIABLE, tc.TL_COMPLETE_PROGRAM_RYG, tlsID, length)
self._connection._string += struct.pack("!Bi",
tc.TYPE_COMPOUND, itemNo)
# programID
self._connection._packString(tls._subID)
# type
self._connection._string += struct.pack("!Bi", tc.TYPE_INTEGER, 0)
# subitems
self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 0)
# index
self._connection._string += struct.pack("!Bi",
tc.TYPE_INTEGER, tls._currentPhaseIndex)
# phaseNo
self._connection._string += struct.pack("!Bi",
tc.TYPE_INTEGER, len(tls._phases))
for p in tls._phases:
self._connection._string += struct.pack("!BiBiBi", tc.TYPE_INTEGER,
p._duration, tc.TYPE_INTEGER, p._duration1, tc.TYPE_INTEGER, p._duration2)
self._connection._packString(p._phaseDef)
self._connection._sendExact()
|
[
"def",
"setCompleteRedYellowGreenDefinition",
"(",
"self",
",",
"tlsID",
",",
"tls",
")",
":",
"length",
"=",
"1",
"+",
"4",
"+",
"1",
"+",
"4",
"+",
"len",
"(",
"tls",
".",
"_subID",
")",
"+",
"1",
"+",
"4",
"+",
"1",
"+",
"4",
"+",
"1",
"+",
"4",
"+",
"1",
"+",
"4",
"# tls parameter",
"itemNo",
"=",
"1",
"+",
"1",
"+",
"1",
"+",
"1",
"+",
"1",
"for",
"p",
"in",
"tls",
".",
"_phases",
":",
"length",
"+=",
"1",
"+",
"4",
"+",
"1",
"+",
"4",
"+",
"1",
"+",
"4",
"+",
"1",
"+",
"4",
"+",
"len",
"(",
"p",
".",
"_phaseDef",
")",
"itemNo",
"+=",
"4",
"self",
".",
"_connection",
".",
"_beginMessage",
"(",
"tc",
".",
"CMD_SET_TL_VARIABLE",
",",
"tc",
".",
"TL_COMPLETE_PROGRAM_RYG",
",",
"tlsID",
",",
"length",
")",
"self",
".",
"_connection",
".",
"_string",
"+=",
"struct",
".",
"pack",
"(",
"\"!Bi\"",
",",
"tc",
".",
"TYPE_COMPOUND",
",",
"itemNo",
")",
"# programID",
"self",
".",
"_connection",
".",
"_packString",
"(",
"tls",
".",
"_subID",
")",
"# type",
"self",
".",
"_connection",
".",
"_string",
"+=",
"struct",
".",
"pack",
"(",
"\"!Bi\"",
",",
"tc",
".",
"TYPE_INTEGER",
",",
"0",
")",
"# subitems",
"self",
".",
"_connection",
".",
"_string",
"+=",
"struct",
".",
"pack",
"(",
"\"!Bi\"",
",",
"tc",
".",
"TYPE_COMPOUND",
",",
"0",
")",
"# index",
"self",
".",
"_connection",
".",
"_string",
"+=",
"struct",
".",
"pack",
"(",
"\"!Bi\"",
",",
"tc",
".",
"TYPE_INTEGER",
",",
"tls",
".",
"_currentPhaseIndex",
")",
"# phaseNo",
"self",
".",
"_connection",
".",
"_string",
"+=",
"struct",
".",
"pack",
"(",
"\"!Bi\"",
",",
"tc",
".",
"TYPE_INTEGER",
",",
"len",
"(",
"tls",
".",
"_phases",
")",
")",
"for",
"p",
"in",
"tls",
".",
"_phases",
":",
"self",
".",
"_connection",
".",
"_string",
"+=",
"struct",
".",
"pack",
"(",
"\"!BiBiBi\"",
",",
"tc",
".",
"TYPE_INTEGER",
",",
"p",
".",
"_duration",
",",
"tc",
".",
"TYPE_INTEGER",
",",
"p",
".",
"_duration1",
",",
"tc",
".",
"TYPE_INTEGER",
",",
"p",
".",
"_duration2",
")",
"self",
".",
"_connection",
".",
"_packString",
"(",
"p",
".",
"_phaseDef",
")",
"self",
".",
"_connection",
".",
"_sendExact",
"(",
")"
] | 47.9375 | 23.28125 |
def query_flag(ifo, name, start_time, end_time,
source='any', server="segments.ligo.org",
veto_definer=None, cache=False):
"""Return the times where the flag is active
Parameters
----------
ifo: string
The interferometer to query (H1, L1).
name: string
The status flag to query from LOSC.
start_time: int
The starting gps time to begin querying from LOSC
end_time: int
The end gps time of the query
source: str, Optional
Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may
also be given. The default is to try GWOSC first then try dqsegdb.
server: str, Optional
The server path. Only used with dqsegdb atm.
veto_definer: str, Optional
The path to a veto definer to define groups of flags which
themselves define a set of segments.
cache: bool
If true cache the query. Default is not to cache
Returns
---------
segments: glue.segments.segmentlist
List of segments
"""
info = name.split(':')
if len(info) == 2:
segment_name, version = info
elif len(info) == 1:
segment_name = info[0]
version = 1
flag_segments = segmentlist([])
if source in ['GWOSC', 'any']:
# Special cases as the LOSC convention is backwards from normal
# LIGO / Virgo operation!!!!
if (('_HW_INJ' in segment_name and 'NO' not in segment_name) or
'VETO' in segment_name):
data = query_flag(ifo, 'DATA', start_time, end_time)
if '_HW_INJ' in segment_name:
name = 'NO_' + segment_name
else:
name = segment_name.replace('_VETO', '')
negate = query_flag(ifo, name, start_time, end_time, cache=cache)
return (data - negate).coalesce()
duration = end_time - start_time
url = GWOSC_URL.format(get_run(start_time + duration/2),
ifo, segment_name,
int(start_time), int(duration))
try:
fname = download_file(url, cache=cache)
data = json.load(open(fname, 'r'))
if 'segments' in data:
flag_segments = data['segments']
except Exception as e:
msg = "Unable to find segments in GWOSC, check flag name or times"
print(e)
if source != 'any':
raise ValueError(msg)
else:
print("Tried and failed GWOSC {}, trying dqsegdb", name)
return query_flag(ifo, segment_name, start_time, end_time,
source='dqsegdb', server=server,
veto_definer=veto_definer)
elif source == 'dqsegdb':
# Let's not hard require dqsegdb to be installed if we never get here.
try:
from dqsegdb.apicalls import dqsegdbQueryTimes as query
except ImportError:
raise ValueError("Could not query flag. Install dqsegdb"
":'pip install dqsegdb'")
# The veto definer will allow the use of MACRO names
# These directly correspond the name defined in the veto definer file.
if veto_definer is not None:
veto_def = parse_veto_definer(veto_definer)
# We treat the veto definer name as if it were its own flag and
# a process the flags in the veto definer
if veto_definer is not None and segment_name in veto_def[ifo]:
for flag in veto_def[ifo][segment_name]:
segs = query("https", server, ifo, flag['name'],
flag['version'], 'active',
int(start_time), int(end_time))[0]['active']
# Apply padding to each segment
for rseg in segs:
seg_start = rseg[0] + flag['start_pad']
seg_end = rseg[1] + flag['end_pad']
flag_segments.append(segment(seg_start, seg_end))
# Apply start / end of the veto definer segment
send = segmentlist([segment([veto_def['start'], veto_def['end']])])
flag_segments = (flag_segments.coalesce() & send)
else: # Standard case just query directly.
try:
segs = query("https", server, ifo, name, version,
'active', int(start_time),
int(end_time))[0]['active']
for rseg in segs:
flag_segments.append(segment(rseg[0], rseg[1]))
except Exception as e:
print("Could not query flag, check name "
" (%s) or times" % segment_name)
raise e
else:
raise ValueError("Source must be dqsegdb or GWOSC."
" Got {}".format(source))
return segmentlist(flag_segments).coalesce()
|
[
"def",
"query_flag",
"(",
"ifo",
",",
"name",
",",
"start_time",
",",
"end_time",
",",
"source",
"=",
"'any'",
",",
"server",
"=",
"\"segments.ligo.org\"",
",",
"veto_definer",
"=",
"None",
",",
"cache",
"=",
"False",
")",
":",
"info",
"=",
"name",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"info",
")",
"==",
"2",
":",
"segment_name",
",",
"version",
"=",
"info",
"elif",
"len",
"(",
"info",
")",
"==",
"1",
":",
"segment_name",
"=",
"info",
"[",
"0",
"]",
"version",
"=",
"1",
"flag_segments",
"=",
"segmentlist",
"(",
"[",
"]",
")",
"if",
"source",
"in",
"[",
"'GWOSC'",
",",
"'any'",
"]",
":",
"# Special cases as the LOSC convention is backwards from normal",
"# LIGO / Virgo operation!!!!",
"if",
"(",
"(",
"'_HW_INJ'",
"in",
"segment_name",
"and",
"'NO'",
"not",
"in",
"segment_name",
")",
"or",
"'VETO'",
"in",
"segment_name",
")",
":",
"data",
"=",
"query_flag",
"(",
"ifo",
",",
"'DATA'",
",",
"start_time",
",",
"end_time",
")",
"if",
"'_HW_INJ'",
"in",
"segment_name",
":",
"name",
"=",
"'NO_'",
"+",
"segment_name",
"else",
":",
"name",
"=",
"segment_name",
".",
"replace",
"(",
"'_VETO'",
",",
"''",
")",
"negate",
"=",
"query_flag",
"(",
"ifo",
",",
"name",
",",
"start_time",
",",
"end_time",
",",
"cache",
"=",
"cache",
")",
"return",
"(",
"data",
"-",
"negate",
")",
".",
"coalesce",
"(",
")",
"duration",
"=",
"end_time",
"-",
"start_time",
"url",
"=",
"GWOSC_URL",
".",
"format",
"(",
"get_run",
"(",
"start_time",
"+",
"duration",
"/",
"2",
")",
",",
"ifo",
",",
"segment_name",
",",
"int",
"(",
"start_time",
")",
",",
"int",
"(",
"duration",
")",
")",
"try",
":",
"fname",
"=",
"download_file",
"(",
"url",
",",
"cache",
"=",
"cache",
")",
"data",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"fname",
",",
"'r'",
")",
")",
"if",
"'segments'",
"in",
"data",
":",
"flag_segments",
"=",
"data",
"[",
"'segments'",
"]",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"\"Unable to find segments in GWOSC, check flag name or times\"",
"print",
"(",
"e",
")",
"if",
"source",
"!=",
"'any'",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"else",
":",
"print",
"(",
"\"Tried and failed GWOSC {}, trying dqsegdb\"",
",",
"name",
")",
"return",
"query_flag",
"(",
"ifo",
",",
"segment_name",
",",
"start_time",
",",
"end_time",
",",
"source",
"=",
"'dqsegdb'",
",",
"server",
"=",
"server",
",",
"veto_definer",
"=",
"veto_definer",
")",
"elif",
"source",
"==",
"'dqsegdb'",
":",
"# Let's not hard require dqsegdb to be installed if we never get here.",
"try",
":",
"from",
"dqsegdb",
".",
"apicalls",
"import",
"dqsegdbQueryTimes",
"as",
"query",
"except",
"ImportError",
":",
"raise",
"ValueError",
"(",
"\"Could not query flag. Install dqsegdb\"",
"\":'pip install dqsegdb'\"",
")",
"# The veto definer will allow the use of MACRO names",
"# These directly correspond the name defined in the veto definer file.",
"if",
"veto_definer",
"is",
"not",
"None",
":",
"veto_def",
"=",
"parse_veto_definer",
"(",
"veto_definer",
")",
"# We treat the veto definer name as if it were its own flag and",
"# a process the flags in the veto definer",
"if",
"veto_definer",
"is",
"not",
"None",
"and",
"segment_name",
"in",
"veto_def",
"[",
"ifo",
"]",
":",
"for",
"flag",
"in",
"veto_def",
"[",
"ifo",
"]",
"[",
"segment_name",
"]",
":",
"segs",
"=",
"query",
"(",
"\"https\"",
",",
"server",
",",
"ifo",
",",
"flag",
"[",
"'name'",
"]",
",",
"flag",
"[",
"'version'",
"]",
",",
"'active'",
",",
"int",
"(",
"start_time",
")",
",",
"int",
"(",
"end_time",
")",
")",
"[",
"0",
"]",
"[",
"'active'",
"]",
"# Apply padding to each segment",
"for",
"rseg",
"in",
"segs",
":",
"seg_start",
"=",
"rseg",
"[",
"0",
"]",
"+",
"flag",
"[",
"'start_pad'",
"]",
"seg_end",
"=",
"rseg",
"[",
"1",
"]",
"+",
"flag",
"[",
"'end_pad'",
"]",
"flag_segments",
".",
"append",
"(",
"segment",
"(",
"seg_start",
",",
"seg_end",
")",
")",
"# Apply start / end of the veto definer segment",
"send",
"=",
"segmentlist",
"(",
"[",
"segment",
"(",
"[",
"veto_def",
"[",
"'start'",
"]",
",",
"veto_def",
"[",
"'end'",
"]",
"]",
")",
"]",
")",
"flag_segments",
"=",
"(",
"flag_segments",
".",
"coalesce",
"(",
")",
"&",
"send",
")",
"else",
":",
"# Standard case just query directly.",
"try",
":",
"segs",
"=",
"query",
"(",
"\"https\"",
",",
"server",
",",
"ifo",
",",
"name",
",",
"version",
",",
"'active'",
",",
"int",
"(",
"start_time",
")",
",",
"int",
"(",
"end_time",
")",
")",
"[",
"0",
"]",
"[",
"'active'",
"]",
"for",
"rseg",
"in",
"segs",
":",
"flag_segments",
".",
"append",
"(",
"segment",
"(",
"rseg",
"[",
"0",
"]",
",",
"rseg",
"[",
"1",
"]",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"Could not query flag, check name \"",
"\" (%s) or times\"",
"%",
"segment_name",
")",
"raise",
"e",
"else",
":",
"raise",
"ValueError",
"(",
"\"Source must be dqsegdb or GWOSC.\"",
"\" Got {}\"",
".",
"format",
"(",
"source",
")",
")",
"return",
"segmentlist",
"(",
"flag_segments",
")",
".",
"coalesce",
"(",
")"
] | 38.055118 | 20.582677 |
def update_records(self, headers, zoneID, updateRecords):
"""Update DNS records."""
IP = requests.get(self.GET_EXT_IP_URL).text
message = True
errorsRecords = []
sucessRecords = []
for record in updateRecords:
updateEndpoint = '/' + zoneID + '/dns_records/' + record[0]
updateUrl = self.BASE_URL + updateEndpoint
data = json.dumps({
'id': zoneID,
'type': record[2],
'name': record[1],
'content': IP,
'proxied': record[4]
})
if record[3] != IP and record[2] == 'A':
result = requests.put(updateUrl,
headers=headers, data=data).json()
if result['success'] == True:
sucessRecords.append(record[1])
else:
errorsRecords.append(record[1])
if errorsRecords != []:
message = ("There was an error updating these records: "
+ str(errorsRecords) + " , the rest is OK.")
else:
message = ("These records got updated: "
+ str(sucessRecords))
return message
|
[
"def",
"update_records",
"(",
"self",
",",
"headers",
",",
"zoneID",
",",
"updateRecords",
")",
":",
"IP",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"GET_EXT_IP_URL",
")",
".",
"text",
"message",
"=",
"True",
"errorsRecords",
"=",
"[",
"]",
"sucessRecords",
"=",
"[",
"]",
"for",
"record",
"in",
"updateRecords",
":",
"updateEndpoint",
"=",
"'/'",
"+",
"zoneID",
"+",
"'/dns_records/'",
"+",
"record",
"[",
"0",
"]",
"updateUrl",
"=",
"self",
".",
"BASE_URL",
"+",
"updateEndpoint",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"'id'",
":",
"zoneID",
",",
"'type'",
":",
"record",
"[",
"2",
"]",
",",
"'name'",
":",
"record",
"[",
"1",
"]",
",",
"'content'",
":",
"IP",
",",
"'proxied'",
":",
"record",
"[",
"4",
"]",
"}",
")",
"if",
"record",
"[",
"3",
"]",
"!=",
"IP",
"and",
"record",
"[",
"2",
"]",
"==",
"'A'",
":",
"result",
"=",
"requests",
".",
"put",
"(",
"updateUrl",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"data",
")",
".",
"json",
"(",
")",
"if",
"result",
"[",
"'success'",
"]",
"==",
"True",
":",
"sucessRecords",
".",
"append",
"(",
"record",
"[",
"1",
"]",
")",
"else",
":",
"errorsRecords",
".",
"append",
"(",
"record",
"[",
"1",
"]",
")",
"if",
"errorsRecords",
"!=",
"[",
"]",
":",
"message",
"=",
"(",
"\"There was an error updating these records: \"",
"+",
"str",
"(",
"errorsRecords",
")",
"+",
"\" , the rest is OK.\"",
")",
"else",
":",
"message",
"=",
"(",
"\"These records got updated: \"",
"+",
"str",
"(",
"sucessRecords",
")",
")",
"return",
"message"
] | 41.366667 | 13.266667 |
def get_pull_request_thread(self, repository_id, pull_request_id, thread_id, project=None, iteration=None, base_iteration=None):
"""GetPullRequestThread.
[Preview API] Retrieve a thread in a pull request.
:param str repository_id: The repository ID of the pull request's target branch.
:param int pull_request_id: ID of the pull request.
:param int thread_id: ID of the thread.
:param str project: Project ID or project name
:param int iteration: If specified, thread position will be tracked using this iteration as the right side of the diff.
:param int base_iteration: If specified, thread position will be tracked using this iteration as the left side of the diff.
:rtype: :class:`<GitPullRequestCommentThread> <azure.devops.v5_1.git.models.GitPullRequestCommentThread>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
if pull_request_id is not None:
route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int')
if thread_id is not None:
route_values['threadId'] = self._serialize.url('thread_id', thread_id, 'int')
query_parameters = {}
if iteration is not None:
query_parameters['$iteration'] = self._serialize.query('iteration', iteration, 'int')
if base_iteration is not None:
query_parameters['$baseIteration'] = self._serialize.query('base_iteration', base_iteration, 'int')
response = self._send(http_method='GET',
location_id='ab6e2e5d-a0b7-4153-b64a-a4efe0d49449',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('GitPullRequestCommentThread', response)
|
[
"def",
"get_pull_request_thread",
"(",
"self",
",",
"repository_id",
",",
"pull_request_id",
",",
"thread_id",
",",
"project",
"=",
"None",
",",
"iteration",
"=",
"None",
",",
"base_iteration",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"repository_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'repositoryId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'repository_id'",
",",
"repository_id",
",",
"'str'",
")",
"if",
"pull_request_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'pullRequestId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'pull_request_id'",
",",
"pull_request_id",
",",
"'int'",
")",
"if",
"thread_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'threadId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'thread_id'",
",",
"thread_id",
",",
"'int'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"iteration",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'$iteration'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'iteration'",
",",
"iteration",
",",
"'int'",
")",
"if",
"base_iteration",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'$baseIteration'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'base_iteration'",
",",
"base_iteration",
",",
"'int'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'ab6e2e5d-a0b7-4153-b64a-a4efe0d49449'",
",",
"version",
"=",
"'5.1-preview.1'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'GitPullRequestCommentThread'",
",",
"response",
")"
] | 67 | 31.741935 |
def _check_superbox_for_top_levels(self, boxes):
"""Several boxes can only occur at the top level."""
# We are only looking at the boxes contained in a superbox, so if any
# of the blacklisted boxes show up here, it's an error.
TOP_LEVEL_ONLY_BOXES = set(['dtbl'])
box_ids = set([box.box_id for box in boxes])
intersection = box_ids.intersection(TOP_LEVEL_ONLY_BOXES)
if len(intersection) > 0:
msg = "A {0} box cannot be nested in a superbox."
raise IOError(msg.format(list(intersection)[0]))
# Recursively check any contained superboxes.
for box in boxes:
if hasattr(box, 'box'):
self._check_superbox_for_top_levels(box.box)
|
[
"def",
"_check_superbox_for_top_levels",
"(",
"self",
",",
"boxes",
")",
":",
"# We are only looking at the boxes contained in a superbox, so if any",
"# of the blacklisted boxes show up here, it's an error.",
"TOP_LEVEL_ONLY_BOXES",
"=",
"set",
"(",
"[",
"'dtbl'",
"]",
")",
"box_ids",
"=",
"set",
"(",
"[",
"box",
".",
"box_id",
"for",
"box",
"in",
"boxes",
"]",
")",
"intersection",
"=",
"box_ids",
".",
"intersection",
"(",
"TOP_LEVEL_ONLY_BOXES",
")",
"if",
"len",
"(",
"intersection",
")",
">",
"0",
":",
"msg",
"=",
"\"A {0} box cannot be nested in a superbox.\"",
"raise",
"IOError",
"(",
"msg",
".",
"format",
"(",
"list",
"(",
"intersection",
")",
"[",
"0",
"]",
")",
")",
"# Recursively check any contained superboxes.",
"for",
"box",
"in",
"boxes",
":",
"if",
"hasattr",
"(",
"box",
",",
"'box'",
")",
":",
"self",
".",
"_check_superbox_for_top_levels",
"(",
"box",
".",
"box",
")"
] | 49.066667 | 16.666667 |
def accel_increase_transparency(self, *args):
"""Callback to increase transparency.
"""
transparency = self.settings.styleBackground.get_int('transparency')
if int(transparency) - 2 > 0:
self.settings.styleBackground.set_int('transparency', int(transparency) - 2)
return True
|
[
"def",
"accel_increase_transparency",
"(",
"self",
",",
"*",
"args",
")",
":",
"transparency",
"=",
"self",
".",
"settings",
".",
"styleBackground",
".",
"get_int",
"(",
"'transparency'",
")",
"if",
"int",
"(",
"transparency",
")",
"-",
"2",
">",
"0",
":",
"self",
".",
"settings",
".",
"styleBackground",
".",
"set_int",
"(",
"'transparency'",
",",
"int",
"(",
"transparency",
")",
"-",
"2",
")",
"return",
"True"
] | 45.857143 | 16.142857 |
def start_at(self, start_at):
"""
Sets the start_at of this Shift.
RFC 3339; shifted to location timezone + offset. Precision up to the minute is respected; seconds are truncated.
:param start_at: The start_at of this Shift.
:type: str
"""
if start_at is None:
raise ValueError("Invalid value for `start_at`, must not be `None`")
if len(start_at) < 1:
raise ValueError("Invalid value for `start_at`, length must be greater than or equal to `1`")
self._start_at = start_at
|
[
"def",
"start_at",
"(",
"self",
",",
"start_at",
")",
":",
"if",
"start_at",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `start_at`, must not be `None`\"",
")",
"if",
"len",
"(",
"start_at",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `start_at`, length must be greater than or equal to `1`\"",
")",
"self",
".",
"_start_at",
"=",
"start_at"
] | 37.066667 | 25.333333 |
def process_phosphorylation_statements(self):
"""Looks for Phosphorylation events in the graph and extracts them into
INDRA statements.
In particular, looks for a Positive_regulation event node with a child
Phosphorylation event node.
If Positive_regulation has an outgoing Cause edge, that's the subject
If Phosphorylation has an outgoing Theme edge, that's the object
If Phosphorylation has an outgoing Site edge, that's the site
"""
G = self.G
statements = []
pwcs = self.find_event_parent_with_event_child('Positive_regulation',
'Phosphorylation')
for pair in pwcs:
(pos_reg, phos) = pair
cause = self.get_entity_text_for_relation(pos_reg, 'Cause')
theme = self.get_entity_text_for_relation(phos, 'Theme')
print('Cause:', cause, 'Theme:', theme)
# If the trigger word is dephosphorylate or similar, then we
# extract a dephosphorylation statement
trigger_word = self.get_entity_text_for_relation(phos,
'Phosphorylation')
if 'dephos' in trigger_word:
deph = True
else:
deph = False
site = self.get_entity_text_for_relation(phos, 'Site')
theme_node = self.get_related_node(phos, 'Theme')
assert(theme_node is not None)
evidence = self.node_to_evidence(theme_node, is_direct=False)
if theme is not None:
if deph:
statements.append(Dephosphorylation(s2a(cause),
s2a(theme), site, evidence=evidence))
else:
statements.append(Phosphorylation(s2a(cause),
s2a(theme), site, evidence=evidence))
return statements
|
[
"def",
"process_phosphorylation_statements",
"(",
"self",
")",
":",
"G",
"=",
"self",
".",
"G",
"statements",
"=",
"[",
"]",
"pwcs",
"=",
"self",
".",
"find_event_parent_with_event_child",
"(",
"'Positive_regulation'",
",",
"'Phosphorylation'",
")",
"for",
"pair",
"in",
"pwcs",
":",
"(",
"pos_reg",
",",
"phos",
")",
"=",
"pair",
"cause",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"pos_reg",
",",
"'Cause'",
")",
"theme",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"phos",
",",
"'Theme'",
")",
"print",
"(",
"'Cause:'",
",",
"cause",
",",
"'Theme:'",
",",
"theme",
")",
"# If the trigger word is dephosphorylate or similar, then we",
"# extract a dephosphorylation statement",
"trigger_word",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"phos",
",",
"'Phosphorylation'",
")",
"if",
"'dephos'",
"in",
"trigger_word",
":",
"deph",
"=",
"True",
"else",
":",
"deph",
"=",
"False",
"site",
"=",
"self",
".",
"get_entity_text_for_relation",
"(",
"phos",
",",
"'Site'",
")",
"theme_node",
"=",
"self",
".",
"get_related_node",
"(",
"phos",
",",
"'Theme'",
")",
"assert",
"(",
"theme_node",
"is",
"not",
"None",
")",
"evidence",
"=",
"self",
".",
"node_to_evidence",
"(",
"theme_node",
",",
"is_direct",
"=",
"False",
")",
"if",
"theme",
"is",
"not",
"None",
":",
"if",
"deph",
":",
"statements",
".",
"append",
"(",
"Dephosphorylation",
"(",
"s2a",
"(",
"cause",
")",
",",
"s2a",
"(",
"theme",
")",
",",
"site",
",",
"evidence",
"=",
"evidence",
")",
")",
"else",
":",
"statements",
".",
"append",
"(",
"Phosphorylation",
"(",
"s2a",
"(",
"cause",
")",
",",
"s2a",
"(",
"theme",
")",
",",
"site",
",",
"evidence",
"=",
"evidence",
")",
")",
"return",
"statements"
] | 43.066667 | 23.511111 |
def from_sec(class_, sec):
"""
Create a key from an sec bytestream (which is an encoding of a public pair).
"""
public_pair = sec_to_public_pair(sec, class_._generator)
return class_(public_pair=public_pair, is_compressed=is_sec_compressed(sec))
|
[
"def",
"from_sec",
"(",
"class_",
",",
"sec",
")",
":",
"public_pair",
"=",
"sec_to_public_pair",
"(",
"sec",
",",
"class_",
".",
"_generator",
")",
"return",
"class_",
"(",
"public_pair",
"=",
"public_pair",
",",
"is_compressed",
"=",
"is_sec_compressed",
"(",
"sec",
")",
")"
] | 46.666667 | 21 |
def num_neighbors(self, pores, mode='or', flatten=False):
r"""
Returns the number of neigbhoring pores for each given input pore
Parameters
----------
pores : array_like
Pores whose neighbors are to be counted
flatten : boolean (optional)
If ``False`` (default) the number of pores neighboring each input
pore as an array the same length as ``pores``. If ``True`` the
sum total number of is counted.
mode : string
The logic to apply to the returned count of pores.
**'or'** : (default) All neighbors of the input pores. This is
also known as the 'union' in set theory or 'any' in boolean logic.
Both keywords are accepted and treated as 'or'.
**'xor'** : Only neighbors of one and only one input pore. This
is useful for counting the pores that are not shared by any of the
input pores. This is known as 'exclusive_or' in set theory, and
is an accepted input.
**'xnor'** : Neighbors that are shared by two or more input pores.
This is equivalent to counting all neighbors with 'or', minus those
found with 'xor', and is useful for finding neighbors that the
inputs have in common.
**'and'** : Only neighbors shared by all input pores. This is also
known as 'intersection' in set theory and (somtimes) as 'all' in
boolean logic. Both keywords are accepted and treated as 'and'.
Returns
-------
If ``flatten`` is False, a 1D array with number of neighbors in each
element, otherwise a scalar value of the number of neighbors.
Notes
-----
This method literally just counts the number of elements in the array
returned by ``find_neighbor_pores`` using the same logic. Explore
those methods if uncertain about the meaning of the ``mode`` argument
here.
See Also
--------
find_neighbor_pores
find_neighbor_throats
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> Np = pn.num_neighbors(pores=[0, 1], flatten=False)
>>> print(Np)
[3 4]
>>> Np = pn.num_neighbors(pores=[0, 2], flatten=True)
>>> print(Np)
6
>>> Np = pn.num_neighbors(pores=[0, 2], mode='and', flatten=True)
>>> print(Np)
1
"""
pores = self._parse_indices(pores)
# Count number of neighbors
num = self.find_neighbor_pores(pores, flatten=flatten,
mode=mode, include_input=True)
if flatten:
num = sp.size(num)
else:
num = sp.array([sp.size(i) for i in num], dtype=int)
return num
|
[
"def",
"num_neighbors",
"(",
"self",
",",
"pores",
",",
"mode",
"=",
"'or'",
",",
"flatten",
"=",
"False",
")",
":",
"pores",
"=",
"self",
".",
"_parse_indices",
"(",
"pores",
")",
"# Count number of neighbors",
"num",
"=",
"self",
".",
"find_neighbor_pores",
"(",
"pores",
",",
"flatten",
"=",
"flatten",
",",
"mode",
"=",
"mode",
",",
"include_input",
"=",
"True",
")",
"if",
"flatten",
":",
"num",
"=",
"sp",
".",
"size",
"(",
"num",
")",
"else",
":",
"num",
"=",
"sp",
".",
"array",
"(",
"[",
"sp",
".",
"size",
"(",
"i",
")",
"for",
"i",
"in",
"num",
"]",
",",
"dtype",
"=",
"int",
")",
"return",
"num"
] | 37.826667 | 25.893333 |
def wateryear(self):
"""The actual hydrological year according to the selected
reference month.
The reference mont reference |Date.refmonth| defaults to November:
>>> october = Date('1996.10.01')
>>> november = Date('1996.11.01')
>>> october.wateryear
1996
>>> november.wateryear
1997
Note that changing |Date.refmonth| affects all |Date| objects:
>>> october.refmonth = 10
>>> october.wateryear
1997
>>> november.wateryear
1997
>>> october.refmonth = 'November'
>>> october.wateryear
1996
>>> november.wateryear
1997
"""
if self.month < self._firstmonth_wateryear:
return self.year
return self.year + 1
|
[
"def",
"wateryear",
"(",
"self",
")",
":",
"if",
"self",
".",
"month",
"<",
"self",
".",
"_firstmonth_wateryear",
":",
"return",
"self",
".",
"year",
"return",
"self",
".",
"year",
"+",
"1"
] | 26.724138 | 18.448276 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.