text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def _get_valid_fill_mask(arr, dim, limit):
'''helper function to determine values that can be filled when limit is not
None'''
kw = {dim: limit + 1}
# we explicitly use construct method to avoid copy.
new_dim = utils.get_temp_dimname(arr.dims, '_window')
return (arr.isnull().rolling(min_periods=1, **kw)
.construct(new_dim, fill_value=False)
.sum(new_dim, skipna=False)) <= limit | [
"def",
"_get_valid_fill_mask",
"(",
"arr",
",",
"dim",
",",
"limit",
")",
":",
"kw",
"=",
"{",
"dim",
":",
"limit",
"+",
"1",
"}",
"# we explicitly use construct method to avoid copy.",
"new_dim",
"=",
"utils",
".",
"get_temp_dimname",
"(",
"arr",
".",
"dims",
",",
"'_window'",
")",
"return",
"(",
"arr",
".",
"isnull",
"(",
")",
".",
"rolling",
"(",
"min_periods",
"=",
"1",
",",
"*",
"*",
"kw",
")",
".",
"construct",
"(",
"new_dim",
",",
"fill_value",
"=",
"False",
")",
".",
"sum",
"(",
"new_dim",
",",
"skipna",
"=",
"False",
")",
")",
"<=",
"limit"
]
| 46.666667 | 16.444444 |
def process_exception_message(exception):
"""
Process an exception message.
Args:
exception: The exception to process.
Returns:
A filtered string summarizing the exception.
"""
exception_message = str(exception)
for replace_char in ['\t', '\n', '\\n']:
exception_message = exception_message.replace(replace_char, '' if replace_char != '\t' else ' ')
return exception_message.replace('section', 'alias') | [
"def",
"process_exception_message",
"(",
"exception",
")",
":",
"exception_message",
"=",
"str",
"(",
"exception",
")",
"for",
"replace_char",
"in",
"[",
"'\\t'",
",",
"'\\n'",
",",
"'\\\\n'",
"]",
":",
"exception_message",
"=",
"exception_message",
".",
"replace",
"(",
"replace_char",
",",
"''",
"if",
"replace_char",
"!=",
"'\\t'",
"else",
"' '",
")",
"return",
"exception_message",
".",
"replace",
"(",
"'section'",
",",
"'alias'",
")"
]
| 35.071429 | 18.357143 |
def showroom_get_roomid_by_room_url_key(room_url_key):
"""str->str"""
fake_headers_mobile = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'
}
webpage_url = 'https://www.showroom-live.com/' + room_url_key
html = get_content(webpage_url, headers = fake_headers_mobile)
roomid = match1(html, r'room\?room_id\=(\d+)')
assert roomid
return roomid | [
"def",
"showroom_get_roomid_by_room_url_key",
"(",
"room_url_key",
")",
":",
"fake_headers_mobile",
"=",
"{",
"'Accept'",
":",
"'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'",
",",
"'Accept-Charset'",
":",
"'UTF-8,*;q=0.5'",
",",
"'Accept-Encoding'",
":",
"'gzip,deflate,sdch'",
",",
"'Accept-Language'",
":",
"'en-US,en;q=0.8'",
",",
"'User-Agent'",
":",
"'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'",
"}",
"webpage_url",
"=",
"'https://www.showroom-live.com/'",
"+",
"room_url_key",
"html",
"=",
"get_content",
"(",
"webpage_url",
",",
"headers",
"=",
"fake_headers_mobile",
")",
"roomid",
"=",
"match1",
"(",
"html",
",",
"r'room\\?room_id\\=(\\d+)'",
")",
"assert",
"roomid",
"return",
"roomid"
]
| 49.785714 | 24.785714 |
def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place.
"""
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r | [
"def",
"by_col",
"(",
"cls",
",",
"df",
",",
"e",
",",
"b",
",",
"t",
"=",
"None",
",",
"geom_col",
"=",
"'geometry'",
",",
"inplace",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"pandas",
"as",
"pd",
"if",
"not",
"inplace",
":",
"new",
"=",
"df",
".",
"copy",
"(",
")",
"cls",
".",
"by_col",
"(",
"new",
",",
"e",
",",
"b",
",",
"t",
"=",
"t",
",",
"geom_col",
"=",
"geom_col",
",",
"inplace",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"return",
"new",
"import",
"pandas",
"as",
"pd",
"# prep for application over multiple event/population pairs",
"if",
"isinstance",
"(",
"e",
",",
"str",
")",
":",
"e",
"=",
"[",
"e",
"]",
"if",
"isinstance",
"(",
"b",
",",
"str",
")",
":",
"b",
"=",
"[",
"b",
"]",
"if",
"len",
"(",
"e",
")",
">",
"len",
"(",
"b",
")",
":",
"b",
"=",
"b",
"*",
"len",
"(",
"e",
")",
"data",
"=",
"get_points_array",
"(",
"df",
"[",
"geom_col",
"]",
")",
"#Headbanging_Triples doesn't take **kwargs, so filter its arguments",
"# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):",
"w",
"=",
"kwargs",
".",
"pop",
"(",
"'w'",
",",
"None",
")",
"if",
"w",
"is",
"None",
":",
"found",
"=",
"False",
"for",
"k",
"in",
"df",
".",
"_metadata",
":",
"w",
"=",
"df",
".",
"__dict__",
".",
"get",
"(",
"w",
",",
"None",
")",
"if",
"isinstance",
"(",
"w",
",",
"W",
")",
":",
"found",
"=",
"True",
"if",
"not",
"found",
":",
"raise",
"Exception",
"(",
"'Weights not provided and no weights attached to frame!'",
"' Please provide a weight or attach a weight to the'",
"' dataframe'",
")",
"k",
"=",
"kwargs",
".",
"pop",
"(",
"'k'",
",",
"5",
")",
"t",
"=",
"kwargs",
".",
"pop",
"(",
"'t'",
",",
"3",
")",
"angle",
"=",
"kwargs",
".",
"pop",
"(",
"'angle'",
",",
"135.0",
")",
"edgecor",
"=",
"kwargs",
".",
"pop",
"(",
"'edgecor'",
",",
"False",
")",
"hbt",
"=",
"Headbanging_Triples",
"(",
"data",
",",
"w",
",",
"k",
"=",
"k",
",",
"t",
"=",
"t",
",",
"angle",
"=",
"angle",
",",
"edgecor",
"=",
"edgecor",
")",
"res",
"=",
"[",
"]",
"for",
"ename",
",",
"bname",
"in",
"zip",
"(",
"e",
",",
"b",
")",
":",
"r",
"=",
"cls",
"(",
"df",
"[",
"ename",
"]",
",",
"df",
"[",
"bname",
"]",
",",
"hbt",
",",
"*",
"*",
"kwargs",
")",
".",
"r",
"name",
"=",
"'_'",
".",
"join",
"(",
"(",
"'-'",
".",
"join",
"(",
"(",
"ename",
",",
"bname",
")",
")",
",",
"cls",
".",
"__name__",
".",
"lower",
"(",
")",
")",
")",
"df",
"[",
"name",
"]",
"=",
"r"
]
| 41.102564 | 20.769231 |
def setup(__pkg: str) -> jinja2.Environment:
"""Configure a new Jinja environment with our filters.
Args:
__pkg: Package name to use as base for templates searches
Returns:
Configured Jinja environment
"""
dirs = [path.join(d, 'templates')
for d in xdg_basedir.get_data_dirs(__pkg)]
env = jinja2.Environment(
autoescape=jinja2.select_autoescape(['html', 'xml']),
loader=jinja2.ChoiceLoader([jinja2.FileSystemLoader(s) for s in dirs]))
env.loader.loaders.append(jinja2.PackageLoader(__pkg, 'templates'))
env.filters.update(FILTERS)
return env | [
"def",
"setup",
"(",
"__pkg",
":",
"str",
")",
"->",
"jinja2",
".",
"Environment",
":",
"dirs",
"=",
"[",
"path",
".",
"join",
"(",
"d",
",",
"'templates'",
")",
"for",
"d",
"in",
"xdg_basedir",
".",
"get_data_dirs",
"(",
"__pkg",
")",
"]",
"env",
"=",
"jinja2",
".",
"Environment",
"(",
"autoescape",
"=",
"jinja2",
".",
"select_autoescape",
"(",
"[",
"'html'",
",",
"'xml'",
"]",
")",
",",
"loader",
"=",
"jinja2",
".",
"ChoiceLoader",
"(",
"[",
"jinja2",
".",
"FileSystemLoader",
"(",
"s",
")",
"for",
"s",
"in",
"dirs",
"]",
")",
")",
"env",
".",
"loader",
".",
"loaders",
".",
"append",
"(",
"jinja2",
".",
"PackageLoader",
"(",
"__pkg",
",",
"'templates'",
")",
")",
"env",
".",
"filters",
".",
"update",
"(",
"FILTERS",
")",
"return",
"env"
]
| 33.722222 | 20.333333 |
def open_with_encoding(filename, encoding, mode='r'):
"""Return opened file with a specific encoding."""
return io.open(filename, mode=mode, encoding=encoding,
newline='') | [
"def",
"open_with_encoding",
"(",
"filename",
",",
"encoding",
",",
"mode",
"=",
"'r'",
")",
":",
"return",
"io",
".",
"open",
"(",
"filename",
",",
"mode",
"=",
"mode",
",",
"encoding",
"=",
"encoding",
",",
"newline",
"=",
"''",
")"
]
| 48.75 | 10.25 |
def clone_model(model_instance):
"""
Returns a copy of the given model with all objects cloned. This is equivalent to saving the model to
a file and reload it, but it doesn't require writing or reading to/from disk. The original model is not touched.
:param model: model to be cloned
:return: a cloned copy of the given model
"""
data = model_instance.to_dict_with_types()
parser = ModelParser(model_dict=data)
return parser.get_model() | [
"def",
"clone_model",
"(",
"model_instance",
")",
":",
"data",
"=",
"model_instance",
".",
"to_dict_with_types",
"(",
")",
"parser",
"=",
"ModelParser",
"(",
"model_dict",
"=",
"data",
")",
"return",
"parser",
".",
"get_model",
"(",
")"
]
| 33.071429 | 23.928571 |
def VerifyStructure(self, parser_mediator, lines):
"""Verifies whether content corresponds to an SCCM log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
# Identify the token to which we attempt a match.
match = self._PARSING_COMPONENTS['msg_left_delimiter'].match
# Because logs files can lead with a partial event,
# we can't assume that the first character (post-BOM)
# in the file is the beginning of our match - so we
# look for match anywhere in lines.
return match in lines | [
"def",
"VerifyStructure",
"(",
"self",
",",
"parser_mediator",
",",
"lines",
")",
":",
"# Identify the token to which we attempt a match.",
"match",
"=",
"self",
".",
"_PARSING_COMPONENTS",
"[",
"'msg_left_delimiter'",
"]",
".",
"match",
"# Because logs files can lead with a partial event,",
"# we can't assume that the first character (post-BOM)",
"# in the file is the beginning of our match - so we",
"# look for match anywhere in lines.",
"return",
"match",
"in",
"lines"
]
| 39.210526 | 20.210526 |
def _get_verdict(result):
"""Gets verdict of the testcase."""
verdict = result.get("verdict")
if not verdict:
return None
verdict = verdict.strip().lower()
if verdict not in Verdicts.PASS + Verdicts.FAIL + Verdicts.SKIP + Verdicts.WAIT:
return None
return verdict | [
"def",
"_get_verdict",
"(",
"result",
")",
":",
"verdict",
"=",
"result",
".",
"get",
"(",
"\"verdict\"",
")",
"if",
"not",
"verdict",
":",
"return",
"None",
"verdict",
"=",
"verdict",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"verdict",
"not",
"in",
"Verdicts",
".",
"PASS",
"+",
"Verdicts",
".",
"FAIL",
"+",
"Verdicts",
".",
"SKIP",
"+",
"Verdicts",
".",
"WAIT",
":",
"return",
"None",
"return",
"verdict"
]
| 36.333333 | 14.888889 |
def draw_circle(self, center, radius, array, value, mode="set"):
"""
Draws a circle of specified radius on the input array and fills it with specified value
:param center: a tuple for the center of the circle
:type center: tuple (x,y)
:param radius: how many pixels in radius the circle is
:type radius: int
:param array: image to draw circle on
:type array: size (m,n) numpy array
:param value: what value to fill the circle with
:type value: float
:param mode: if "set" will assign the circle interior value, if "add" will add the value to the circle interior,
throws exception otherwise
:type mode: string, either "set" or "add"
:return: updates input array
"""
ri, ci = draw.circle(center[0], center[1],
radius=radius,
shape=array.shape)
if mode == "add":
array[ri, ci] += value
elif mode == "set":
array[ri, ci] = value
else:
raise ValueError("draw_circle mode must be 'set' or 'add' but {} used".format(mode))
return ri, ci, array[ri,ci] | [
"def",
"draw_circle",
"(",
"self",
",",
"center",
",",
"radius",
",",
"array",
",",
"value",
",",
"mode",
"=",
"\"set\"",
")",
":",
"ri",
",",
"ci",
"=",
"draw",
".",
"circle",
"(",
"center",
"[",
"0",
"]",
",",
"center",
"[",
"1",
"]",
",",
"radius",
"=",
"radius",
",",
"shape",
"=",
"array",
".",
"shape",
")",
"if",
"mode",
"==",
"\"add\"",
":",
"array",
"[",
"ri",
",",
"ci",
"]",
"+=",
"value",
"elif",
"mode",
"==",
"\"set\"",
":",
"array",
"[",
"ri",
",",
"ci",
"]",
"=",
"value",
"else",
":",
"raise",
"ValueError",
"(",
"\"draw_circle mode must be 'set' or 'add' but {} used\"",
".",
"format",
"(",
"mode",
")",
")",
"return",
"ri",
",",
"ci",
",",
"array",
"[",
"ri",
",",
"ci",
"]"
]
| 45.230769 | 16.307692 |
def parse_stdout(self, filelike):
"""Parse the content written by the script to standard out into a `CifData` object.
:param filelike: filelike object of stdout
:returns: an exit code in case of an error, None otherwise
"""
from CifFile import StarError
if not filelike.read().strip():
return self.exit_codes.ERROR_EMPTY_OUTPUT_FILE
try:
filelike.seek(0)
cif = CifData(file=filelike)
except StarError:
self.logger.exception('Failed to parse a `CifData` from the stdout file\n%s', traceback.format_exc())
return self.exit_codes.ERROR_PARSING_CIF_DATA
else:
self.out('cif', cif)
return | [
"def",
"parse_stdout",
"(",
"self",
",",
"filelike",
")",
":",
"from",
"CifFile",
"import",
"StarError",
"if",
"not",
"filelike",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
":",
"return",
"self",
".",
"exit_codes",
".",
"ERROR_EMPTY_OUTPUT_FILE",
"try",
":",
"filelike",
".",
"seek",
"(",
"0",
")",
"cif",
"=",
"CifData",
"(",
"file",
"=",
"filelike",
")",
"except",
"StarError",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'Failed to parse a `CifData` from the stdout file\\n%s'",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"self",
".",
"exit_codes",
".",
"ERROR_PARSING_CIF_DATA",
"else",
":",
"self",
".",
"out",
"(",
"'cif'",
",",
"cif",
")",
"return"
]
| 34.238095 | 20.52381 |
def compute_pool(instance1, attribute1, relation1,
instance2, attribute2, relation2,
prefix1, prefix2, doinstance=True, doattribute=True, dorelation=True):
"""
compute all possible node mapping candidates and their weights (the triple matching number gain resulting from
mapping one node in AMR 1 to another node in AMR2)
Arguments:
instance1: instance triples of AMR 1
attribute1: attribute triples of AMR 1 (attribute name, node name, attribute value)
relation1: relation triples of AMR 1 (relation name, node 1 name, node 2 name)
instance2: instance triples of AMR 2
attribute2: attribute triples of AMR 2 (attribute name, node name, attribute value)
relation2: relation triples of AMR 2 (relation name, node 1 name, node 2 name
prefix1: prefix label for AMR 1
prefix2: prefix label for AMR 2
Returns:
candidate_mapping: a list of candidate nodes.
The ith element contains the node indices (in AMR 2) the ith node (in AMR 1) can map to.
(resulting in non-zero triple match)
weight_dict: a dictionary which contains the matching triple number for every pair of node mapping. The key
is a node pair. The value is another dictionary. key {-1} is triple match resulting from this node
pair alone (instance triples and attribute triples), and other keys are node pairs that can result
in relation triple match together with the first node pair.
"""
candidate_mapping = []
weight_dict = {}
for instance1_item in instance1:
# each candidate mapping is a set of node indices
candidate_mapping.append(set())
if doinstance:
for instance2_item in instance2:
# if both triples are instance triples and have the same value
if normalize(instance1_item[0]) == normalize(instance2_item[0]) and \
normalize(instance1_item[2]) == normalize(instance2_item[2]):
# get node index by stripping the prefix
node1_index = int(instance1_item[1][len(prefix1):])
node2_index = int(instance2_item[1][len(prefix2):])
candidate_mapping[node1_index].add(node2_index)
node_pair = (node1_index, node2_index)
# use -1 as key in weight_dict for instance triples and attribute triples
if node_pair in weight_dict:
weight_dict[node_pair][-1] += 1
else:
weight_dict[node_pair] = {}
weight_dict[node_pair][-1] = 1
if doattribute:
for attribute1_item in attribute1:
for attribute2_item in attribute2:
# if both attribute relation triple have the same relation name and value
if normalize(attribute1_item[0]) == normalize(attribute2_item[0]) \
and normalize(attribute1_item[2]) == normalize(attribute2_item[2]):
node1_index = int(attribute1_item[1][len(prefix1):])
node2_index = int(attribute2_item[1][len(prefix2):])
candidate_mapping[node1_index].add(node2_index)
node_pair = (node1_index, node2_index)
# use -1 as key in weight_dict for instance triples and attribute triples
if node_pair in weight_dict:
weight_dict[node_pair][-1] += 1
else:
weight_dict[node_pair] = {}
weight_dict[node_pair][-1] = 1
if dorelation:
for relation1_item in relation1:
for relation2_item in relation2:
# if both relation share the same name
if normalize(relation1_item[0]) == normalize(relation2_item[0]):
node1_index_amr1 = int(relation1_item[1][len(prefix1):])
node1_index_amr2 = int(relation2_item[1][len(prefix2):])
node2_index_amr1 = int(relation1_item[2][len(prefix1):])
node2_index_amr2 = int(relation2_item[2][len(prefix2):])
# add mapping between two nodes
candidate_mapping[node1_index_amr1].add(node1_index_amr2)
candidate_mapping[node2_index_amr1].add(node2_index_amr2)
node_pair1 = (node1_index_amr1, node1_index_amr2)
node_pair2 = (node2_index_amr1, node2_index_amr2)
if node_pair2 != node_pair1:
# update weight_dict weight. Note that we need to update both entries for future search
# i.e weight_dict[node_pair1][node_pair2]
# weight_dict[node_pair2][node_pair1]
if node1_index_amr1 > node2_index_amr1:
# swap node_pair1 and node_pair2
node_pair1 = (node2_index_amr1, node2_index_amr2)
node_pair2 = (node1_index_amr1, node1_index_amr2)
if node_pair1 in weight_dict:
if node_pair2 in weight_dict[node_pair1]:
weight_dict[node_pair1][node_pair2] += 1
else:
weight_dict[node_pair1][node_pair2] = 1
else:
weight_dict[node_pair1] = {-1: 0, node_pair2: 1}
if node_pair2 in weight_dict:
if node_pair1 in weight_dict[node_pair2]:
weight_dict[node_pair2][node_pair1] += 1
else:
weight_dict[node_pair2][node_pair1] = 1
else:
weight_dict[node_pair2] = {-1: 0, node_pair1: 1}
else:
# two node pairs are the same. So we only update weight_dict once.
# this generally should not happen.
if node_pair1 in weight_dict:
weight_dict[node_pair1][-1] += 1
else:
weight_dict[node_pair1] = {-1: 1}
return candidate_mapping, weight_dict | [
"def",
"compute_pool",
"(",
"instance1",
",",
"attribute1",
",",
"relation1",
",",
"instance2",
",",
"attribute2",
",",
"relation2",
",",
"prefix1",
",",
"prefix2",
",",
"doinstance",
"=",
"True",
",",
"doattribute",
"=",
"True",
",",
"dorelation",
"=",
"True",
")",
":",
"candidate_mapping",
"=",
"[",
"]",
"weight_dict",
"=",
"{",
"}",
"for",
"instance1_item",
"in",
"instance1",
":",
"# each candidate mapping is a set of node indices",
"candidate_mapping",
".",
"append",
"(",
"set",
"(",
")",
")",
"if",
"doinstance",
":",
"for",
"instance2_item",
"in",
"instance2",
":",
"# if both triples are instance triples and have the same value",
"if",
"normalize",
"(",
"instance1_item",
"[",
"0",
"]",
")",
"==",
"normalize",
"(",
"instance2_item",
"[",
"0",
"]",
")",
"and",
"normalize",
"(",
"instance1_item",
"[",
"2",
"]",
")",
"==",
"normalize",
"(",
"instance2_item",
"[",
"2",
"]",
")",
":",
"# get node index by stripping the prefix",
"node1_index",
"=",
"int",
"(",
"instance1_item",
"[",
"1",
"]",
"[",
"len",
"(",
"prefix1",
")",
":",
"]",
")",
"node2_index",
"=",
"int",
"(",
"instance2_item",
"[",
"1",
"]",
"[",
"len",
"(",
"prefix2",
")",
":",
"]",
")",
"candidate_mapping",
"[",
"node1_index",
"]",
".",
"add",
"(",
"node2_index",
")",
"node_pair",
"=",
"(",
"node1_index",
",",
"node2_index",
")",
"# use -1 as key in weight_dict for instance triples and attribute triples",
"if",
"node_pair",
"in",
"weight_dict",
":",
"weight_dict",
"[",
"node_pair",
"]",
"[",
"-",
"1",
"]",
"+=",
"1",
"else",
":",
"weight_dict",
"[",
"node_pair",
"]",
"=",
"{",
"}",
"weight_dict",
"[",
"node_pair",
"]",
"[",
"-",
"1",
"]",
"=",
"1",
"if",
"doattribute",
":",
"for",
"attribute1_item",
"in",
"attribute1",
":",
"for",
"attribute2_item",
"in",
"attribute2",
":",
"# if both attribute relation triple have the same relation name and value",
"if",
"normalize",
"(",
"attribute1_item",
"[",
"0",
"]",
")",
"==",
"normalize",
"(",
"attribute2_item",
"[",
"0",
"]",
")",
"and",
"normalize",
"(",
"attribute1_item",
"[",
"2",
"]",
")",
"==",
"normalize",
"(",
"attribute2_item",
"[",
"2",
"]",
")",
":",
"node1_index",
"=",
"int",
"(",
"attribute1_item",
"[",
"1",
"]",
"[",
"len",
"(",
"prefix1",
")",
":",
"]",
")",
"node2_index",
"=",
"int",
"(",
"attribute2_item",
"[",
"1",
"]",
"[",
"len",
"(",
"prefix2",
")",
":",
"]",
")",
"candidate_mapping",
"[",
"node1_index",
"]",
".",
"add",
"(",
"node2_index",
")",
"node_pair",
"=",
"(",
"node1_index",
",",
"node2_index",
")",
"# use -1 as key in weight_dict for instance triples and attribute triples",
"if",
"node_pair",
"in",
"weight_dict",
":",
"weight_dict",
"[",
"node_pair",
"]",
"[",
"-",
"1",
"]",
"+=",
"1",
"else",
":",
"weight_dict",
"[",
"node_pair",
"]",
"=",
"{",
"}",
"weight_dict",
"[",
"node_pair",
"]",
"[",
"-",
"1",
"]",
"=",
"1",
"if",
"dorelation",
":",
"for",
"relation1_item",
"in",
"relation1",
":",
"for",
"relation2_item",
"in",
"relation2",
":",
"# if both relation share the same name",
"if",
"normalize",
"(",
"relation1_item",
"[",
"0",
"]",
")",
"==",
"normalize",
"(",
"relation2_item",
"[",
"0",
"]",
")",
":",
"node1_index_amr1",
"=",
"int",
"(",
"relation1_item",
"[",
"1",
"]",
"[",
"len",
"(",
"prefix1",
")",
":",
"]",
")",
"node1_index_amr2",
"=",
"int",
"(",
"relation2_item",
"[",
"1",
"]",
"[",
"len",
"(",
"prefix2",
")",
":",
"]",
")",
"node2_index_amr1",
"=",
"int",
"(",
"relation1_item",
"[",
"2",
"]",
"[",
"len",
"(",
"prefix1",
")",
":",
"]",
")",
"node2_index_amr2",
"=",
"int",
"(",
"relation2_item",
"[",
"2",
"]",
"[",
"len",
"(",
"prefix2",
")",
":",
"]",
")",
"# add mapping between two nodes",
"candidate_mapping",
"[",
"node1_index_amr1",
"]",
".",
"add",
"(",
"node1_index_amr2",
")",
"candidate_mapping",
"[",
"node2_index_amr1",
"]",
".",
"add",
"(",
"node2_index_amr2",
")",
"node_pair1",
"=",
"(",
"node1_index_amr1",
",",
"node1_index_amr2",
")",
"node_pair2",
"=",
"(",
"node2_index_amr1",
",",
"node2_index_amr2",
")",
"if",
"node_pair2",
"!=",
"node_pair1",
":",
"# update weight_dict weight. Note that we need to update both entries for future search",
"# i.e weight_dict[node_pair1][node_pair2]",
"# weight_dict[node_pair2][node_pair1]",
"if",
"node1_index_amr1",
">",
"node2_index_amr1",
":",
"# swap node_pair1 and node_pair2",
"node_pair1",
"=",
"(",
"node2_index_amr1",
",",
"node2_index_amr2",
")",
"node_pair2",
"=",
"(",
"node1_index_amr1",
",",
"node1_index_amr2",
")",
"if",
"node_pair1",
"in",
"weight_dict",
":",
"if",
"node_pair2",
"in",
"weight_dict",
"[",
"node_pair1",
"]",
":",
"weight_dict",
"[",
"node_pair1",
"]",
"[",
"node_pair2",
"]",
"+=",
"1",
"else",
":",
"weight_dict",
"[",
"node_pair1",
"]",
"[",
"node_pair2",
"]",
"=",
"1",
"else",
":",
"weight_dict",
"[",
"node_pair1",
"]",
"=",
"{",
"-",
"1",
":",
"0",
",",
"node_pair2",
":",
"1",
"}",
"if",
"node_pair2",
"in",
"weight_dict",
":",
"if",
"node_pair1",
"in",
"weight_dict",
"[",
"node_pair2",
"]",
":",
"weight_dict",
"[",
"node_pair2",
"]",
"[",
"node_pair1",
"]",
"+=",
"1",
"else",
":",
"weight_dict",
"[",
"node_pair2",
"]",
"[",
"node_pair1",
"]",
"=",
"1",
"else",
":",
"weight_dict",
"[",
"node_pair2",
"]",
"=",
"{",
"-",
"1",
":",
"0",
",",
"node_pair1",
":",
"1",
"}",
"else",
":",
"# two node pairs are the same. So we only update weight_dict once.",
"# this generally should not happen.",
"if",
"node_pair1",
"in",
"weight_dict",
":",
"weight_dict",
"[",
"node_pair1",
"]",
"[",
"-",
"1",
"]",
"+=",
"1",
"else",
":",
"weight_dict",
"[",
"node_pair1",
"]",
"=",
"{",
"-",
"1",
":",
"1",
"}",
"return",
"candidate_mapping",
",",
"weight_dict"
]
| 58.731481 | 26.157407 |
def add_javascripts(self, *js_files):
"""add javascripts files in HTML body"""
# create the script tag if don't exists
if self.main_soup.script is None:
script_tag = self.main_soup.new_tag('script')
self.main_soup.body.append(script_tag)
for js_file in js_files:
self.main_soup.script.append(self._text_file(js_file)) | [
"def",
"add_javascripts",
"(",
"self",
",",
"*",
"js_files",
")",
":",
"# create the script tag if don't exists",
"if",
"self",
".",
"main_soup",
".",
"script",
"is",
"None",
":",
"script_tag",
"=",
"self",
".",
"main_soup",
".",
"new_tag",
"(",
"'script'",
")",
"self",
".",
"main_soup",
".",
"body",
".",
"append",
"(",
"script_tag",
")",
"for",
"js_file",
"in",
"js_files",
":",
"self",
".",
"main_soup",
".",
"script",
".",
"append",
"(",
"self",
".",
"_text_file",
"(",
"js_file",
")",
")"
]
| 42 | 12.444444 |
def get_task_subtask_positions_objs(client, task_id):
'''
Gets a list of the positions of a single task's subtasks
Each task should (will?) only have one positions object defining how its subtasks are laid out
'''
params = {
'task_id' : int(task_id)
}
response = client.authenticated_request(client.api.Endpoints.SUBTASK_POSITIONS, params=params)
return response.json() | [
"def",
"get_task_subtask_positions_objs",
"(",
"client",
",",
"task_id",
")",
":",
"params",
"=",
"{",
"'task_id'",
":",
"int",
"(",
"task_id",
")",
"}",
"response",
"=",
"client",
".",
"authenticated_request",
"(",
"client",
".",
"api",
".",
"Endpoints",
".",
"SUBTASK_POSITIONS",
",",
"params",
"=",
"params",
")",
"return",
"response",
".",
"json",
"(",
")"
]
| 37.454545 | 29.636364 |
def apply_mtlist_budget_obs(list_filename,gw_filename="mtlist_gw.dat",
sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970"):
""" process an MT3D list file to extract mass budget entries.
Parameters
----------
list_filename : str
the mt3d list file
gw_filename : str
the name of the output file with gw mass budget information.
Default is "mtlist_gw.dat"
sw_filename : str
the name of the output file with sw mass budget information.
Default is "mtlist_sw.dat"
start_datatime : str
an str that can be cast to a pandas.TimeStamp. Used to give
observations a meaningful name
Returns
-------
gw : pandas.DataFrame
the gw mass dataframe
sw : pandas.DataFrame (optional)
the sw mass dataframe
Note
----
requires flopy
if SFT is not active, no SW mass budget will be returned
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mt = flopy.utils.MtListBudget(list_filename)
gw, sw = mt.parse(start_datetime=start_datetime, diff=True)
gw = gw.drop([col for col in gw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))], axis=1)
gw.to_csv(gw_filename, sep=' ', index_label="datetime", date_format="%Y%m%d")
if sw is not None:
sw = sw.drop([col for col in sw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))], axis=1)
sw.to_csv(sw_filename, sep=' ', index_label="datetime", date_format="%Y%m%d")
return gw, sw | [
"def",
"apply_mtlist_budget_obs",
"(",
"list_filename",
",",
"gw_filename",
"=",
"\"mtlist_gw.dat\"",
",",
"sw_filename",
"=",
"\"mtlist_sw.dat\"",
",",
"start_datetime",
"=",
"\"1-1-1970\"",
")",
":",
"try",
":",
"import",
"flopy",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"\"error import flopy: {0}\"",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
")",
"mt",
"=",
"flopy",
".",
"utils",
".",
"MtListBudget",
"(",
"list_filename",
")",
"gw",
",",
"sw",
"=",
"mt",
".",
"parse",
"(",
"start_datetime",
"=",
"start_datetime",
",",
"diff",
"=",
"True",
")",
"gw",
"=",
"gw",
".",
"drop",
"(",
"[",
"col",
"for",
"col",
"in",
"gw",
".",
"columns",
"for",
"drop_col",
"in",
"[",
"\"kper\"",
",",
"\"kstp\"",
",",
"\"tkstp\"",
"]",
"if",
"(",
"col",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"drop_col",
")",
")",
"]",
",",
"axis",
"=",
"1",
")",
"gw",
".",
"to_csv",
"(",
"gw_filename",
",",
"sep",
"=",
"' '",
",",
"index_label",
"=",
"\"datetime\"",
",",
"date_format",
"=",
"\"%Y%m%d\"",
")",
"if",
"sw",
"is",
"not",
"None",
":",
"sw",
"=",
"sw",
".",
"drop",
"(",
"[",
"col",
"for",
"col",
"in",
"sw",
".",
"columns",
"for",
"drop_col",
"in",
"[",
"\"kper\"",
",",
"\"kstp\"",
",",
"\"tkstp\"",
"]",
"if",
"(",
"col",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"drop_col",
")",
")",
"]",
",",
"axis",
"=",
"1",
")",
"sw",
".",
"to_csv",
"(",
"sw_filename",
",",
"sep",
"=",
"' '",
",",
"index_label",
"=",
"\"datetime\"",
",",
"date_format",
"=",
"\"%Y%m%d\"",
")",
"return",
"gw",
",",
"sw"
]
| 35.142857 | 21.55102 |
def maskedNanPercentile(maskedArray, percentiles, *args, **kwargs):
""" Calculates np.nanpercentile on the non-masked values
"""
#https://docs.scipy.org/doc/numpy/reference/maskedarray.generic.html#accessing-the-data
awm = ArrayWithMask.createFromMaskedArray(maskedArray)
maskIdx = awm.maskIndex()
validData = awm.data[~maskIdx]
if len(validData) >= 1:
result = np.nanpercentile(validData, percentiles, *args, **kwargs)
else:
# If np.nanpercentile on an empty list only returns a single Nan. We correct this here.
result = len(percentiles) * [np.nan]
assert len(result) == len(percentiles), \
"shape mismatch: {} != {}".format(len(result), len(percentiles))
return result | [
"def",
"maskedNanPercentile",
"(",
"maskedArray",
",",
"percentiles",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"#https://docs.scipy.org/doc/numpy/reference/maskedarray.generic.html#accessing-the-data",
"awm",
"=",
"ArrayWithMask",
".",
"createFromMaskedArray",
"(",
"maskedArray",
")",
"maskIdx",
"=",
"awm",
".",
"maskIndex",
"(",
")",
"validData",
"=",
"awm",
".",
"data",
"[",
"~",
"maskIdx",
"]",
"if",
"len",
"(",
"validData",
")",
">=",
"1",
":",
"result",
"=",
"np",
".",
"nanpercentile",
"(",
"validData",
",",
"percentiles",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"# If np.nanpercentile on an empty list only returns a single Nan. We correct this here.",
"result",
"=",
"len",
"(",
"percentiles",
")",
"*",
"[",
"np",
".",
"nan",
"]",
"assert",
"len",
"(",
"result",
")",
"==",
"len",
"(",
"percentiles",
")",
",",
"\"shape mismatch: {} != {}\"",
".",
"format",
"(",
"len",
"(",
"result",
")",
",",
"len",
"(",
"percentiles",
")",
")",
"return",
"result"
]
| 36.45 | 25.5 |
def incoming_messages(self) -> t.List[t.Tuple[float, bytes]]:
"""Consume the receive buffer and return the messages.
If there are new messages added to the queue while this funciton is being
processed, they will not be returned. This ensures that this terminates in
a timely manner.
"""
approximate_messages = self._receive_buffer.qsize()
messages = []
for _ in range(approximate_messages):
try:
messages.append(self._receive_buffer.get_nowait())
except queue.Empty:
break
return messages | [
"def",
"incoming_messages",
"(",
"self",
")",
"->",
"t",
".",
"List",
"[",
"t",
".",
"Tuple",
"[",
"float",
",",
"bytes",
"]",
"]",
":",
"approximate_messages",
"=",
"self",
".",
"_receive_buffer",
".",
"qsize",
"(",
")",
"messages",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"approximate_messages",
")",
":",
"try",
":",
"messages",
".",
"append",
"(",
"self",
".",
"_receive_buffer",
".",
"get_nowait",
"(",
")",
")",
"except",
"queue",
".",
"Empty",
":",
"break",
"return",
"messages"
]
| 40.2 | 19.866667 |
def name(name, validator=None):
""" Set a name on a validator callable.
Useful for user-friendly reporting when using lambdas to populate the [`Invalid.expected`](#invalid) field:
```python
from good import Schema, name
Schema(lambda x: int(x))('a')
#-> Invalid: invalid literal for int(): expected <lambda>(), got
Schema(name('int()', lambda x: int(x))('a')
#-> Invalid: invalid literal for int(): expected int(), got a
```
Note that it is only useful with lambdas, since function name is used if available:
see notes on [Schema Callables](#callables).
:param name: Name to assign on the validator callable
:type name: unicode
:param validator: Validator callable. If not provided -- a decorator is returned instead:
```python
from good import name
@name(u'int()')
def int(v):
return int(v)
```
:type validator: callable
:return: The same validator callable
:rtype: callable
"""
# Decorator mode
if validator is None:
def decorator(f):
f.name = name
return f
return decorator
# Direct mode
validator.name = name
return validator | [
"def",
"name",
"(",
"name",
",",
"validator",
"=",
"None",
")",
":",
"# Decorator mode",
"if",
"validator",
"is",
"None",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"f",
".",
"name",
"=",
"name",
"return",
"f",
"return",
"decorator",
"# Direct mode",
"validator",
".",
"name",
"=",
"name",
"return",
"validator"
]
| 27.44186 | 23.767442 |
def get_subject(self, identifier):
"""
Build a Subject XML block for a SAML 1.1
AuthenticationStatement or AttributeStatement.
"""
subject = etree.Element('Subject')
name = etree.SubElement(subject, 'NameIdentifier')
name.text = identifier
subject_confirmation = etree.SubElement(subject, 'SubjectConfirmation')
method = etree.SubElement(subject_confirmation, 'ConfirmationMethod')
method.text = self.confirmation_method
return subject | [
"def",
"get_subject",
"(",
"self",
",",
"identifier",
")",
":",
"subject",
"=",
"etree",
".",
"Element",
"(",
"'Subject'",
")",
"name",
"=",
"etree",
".",
"SubElement",
"(",
"subject",
",",
"'NameIdentifier'",
")",
"name",
".",
"text",
"=",
"identifier",
"subject_confirmation",
"=",
"etree",
".",
"SubElement",
"(",
"subject",
",",
"'SubjectConfirmation'",
")",
"method",
"=",
"etree",
".",
"SubElement",
"(",
"subject_confirmation",
",",
"'ConfirmationMethod'",
")",
"method",
".",
"text",
"=",
"self",
".",
"confirmation_method",
"return",
"subject"
]
| 42.666667 | 13.166667 |
def reissueOverLongJobs(self):
"""
Check each issued job - if it is running for longer than desirable
issue a kill instruction.
Wait for the job to die then we pass the job to processFinishedJob.
"""
maxJobDuration = self.config.maxJobDuration
jobsToKill = []
if maxJobDuration < 10000000: # We won't bother doing anything if rescue time > 16 weeks.
runningJobs = self.batchSystem.getRunningBatchJobIDs()
for jobBatchSystemID in list(runningJobs.keys()):
if runningJobs[jobBatchSystemID] > maxJobDuration:
logger.warn("The job: %s has been running for: %s seconds, more than the "
"max job duration: %s, we'll kill it",
str(self.jobBatchSystemIDToIssuedJob[jobBatchSystemID].jobStoreID),
str(runningJobs[jobBatchSystemID]),
str(maxJobDuration))
jobsToKill.append(jobBatchSystemID)
self.killJobs(jobsToKill) | [
"def",
"reissueOverLongJobs",
"(",
"self",
")",
":",
"maxJobDuration",
"=",
"self",
".",
"config",
".",
"maxJobDuration",
"jobsToKill",
"=",
"[",
"]",
"if",
"maxJobDuration",
"<",
"10000000",
":",
"# We won't bother doing anything if rescue time > 16 weeks.",
"runningJobs",
"=",
"self",
".",
"batchSystem",
".",
"getRunningBatchJobIDs",
"(",
")",
"for",
"jobBatchSystemID",
"in",
"list",
"(",
"runningJobs",
".",
"keys",
"(",
")",
")",
":",
"if",
"runningJobs",
"[",
"jobBatchSystemID",
"]",
">",
"maxJobDuration",
":",
"logger",
".",
"warn",
"(",
"\"The job: %s has been running for: %s seconds, more than the \"",
"\"max job duration: %s, we'll kill it\"",
",",
"str",
"(",
"self",
".",
"jobBatchSystemIDToIssuedJob",
"[",
"jobBatchSystemID",
"]",
".",
"jobStoreID",
")",
",",
"str",
"(",
"runningJobs",
"[",
"jobBatchSystemID",
"]",
")",
",",
"str",
"(",
"maxJobDuration",
")",
")",
"jobsToKill",
".",
"append",
"(",
"jobBatchSystemID",
")",
"self",
".",
"killJobs",
"(",
"jobsToKill",
")"
]
| 56.473684 | 23.421053 |
def sql_string_literal(text: str) -> str:
"""
Transforms text into its ANSI SQL-quoted version, e.g. (in Python ``repr()``
format):
.. code-block:: none
"some string" -> "'some string'"
"Jack's dog" -> "'Jack''s dog'"
"""
# ANSI SQL: http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt
# <character string literal>
return SQUOTE + text.replace(SQUOTE, DOUBLE_SQUOTE) + SQUOTE | [
"def",
"sql_string_literal",
"(",
"text",
":",
"str",
")",
"->",
"str",
":",
"# ANSI SQL: http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt",
"# <character string literal>",
"return",
"SQUOTE",
"+",
"text",
".",
"replace",
"(",
"SQUOTE",
",",
"DOUBLE_SQUOTE",
")",
"+",
"SQUOTE"
]
| 32.615385 | 18 |
def euclidean_random_projection_split(data, indices, rng_state):
"""Given a set of ``indices`` for data points from ``data``, create
a random hyperplane to split the data, returning two arrays indices
that fall on either side of the hyperplane. This is the basis for a
random projection tree, which simply uses this splitting recursively.
This particular split uses euclidean distance to determine the hyperplane
and which side each data sample falls on.
Parameters
----------
data: array of shape (n_samples, n_features)
The original data to be split
indices: array of shape (tree_node_size,)
The indices of the elements in the ``data`` array that are to
be split in the current operation.
rng_state: array of int64, shape (3,)
The internal state of the rng
Returns
-------
indices_left: array
The elements of ``indices`` that fall on the "left" side of the
random hyperplane.
indices_right: array
The elements of ``indices`` that fall on the "left" side of the
random hyperplane.
"""
dim = data.shape[1]
# Select two random points, set the hyperplane between them
left_index = tau_rand_int(rng_state) % indices.shape[0]
right_index = tau_rand_int(rng_state) % indices.shape[0]
right_index += left_index == right_index
right_index = right_index % indices.shape[0]
left = indices[left_index]
right = indices[right_index]
# Compute the normal vector to the hyperplane (the vector between
# the two points) and the offset from the origin
hyperplane_offset = 0.0
hyperplane_vector = np.empty(dim, dtype=np.float32)
for d in range(dim):
hyperplane_vector[d] = data[left, d] - data[right, d]
hyperplane_offset -= (
hyperplane_vector[d] * (data[left, d] + data[right, d]) / 2.0
)
# For each point compute the margin (project into normal vector, add offset)
# If we are on lower side of the hyperplane put in one pile, otherwise
# put it in the other pile (if we hit hyperplane on the nose, flip a coin)
n_left = 0
n_right = 0
side = np.empty(indices.shape[0], np.int8)
for i in range(indices.shape[0]):
margin = hyperplane_offset
for d in range(dim):
margin += hyperplane_vector[d] * data[indices[i], d]
if margin == 0:
side[i] = tau_rand_int(rng_state) % 2
if side[i] == 0:
n_left += 1
else:
n_right += 1
elif margin > 0:
side[i] = 0
n_left += 1
else:
side[i] = 1
n_right += 1
# Now that we have the counts allocate arrays
indices_left = np.empty(n_left, dtype=np.int64)
indices_right = np.empty(n_right, dtype=np.int64)
# Populate the arrays with indices according to which side they fell on
n_left = 0
n_right = 0
for i in range(side.shape[0]):
if side[i] == 0:
indices_left[n_left] = indices[i]
n_left += 1
else:
indices_right[n_right] = indices[i]
n_right += 1
return indices_left, indices_right, hyperplane_vector, hyperplane_offset | [
"def",
"euclidean_random_projection_split",
"(",
"data",
",",
"indices",
",",
"rng_state",
")",
":",
"dim",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"# Select two random points, set the hyperplane between them",
"left_index",
"=",
"tau_rand_int",
"(",
"rng_state",
")",
"%",
"indices",
".",
"shape",
"[",
"0",
"]",
"right_index",
"=",
"tau_rand_int",
"(",
"rng_state",
")",
"%",
"indices",
".",
"shape",
"[",
"0",
"]",
"right_index",
"+=",
"left_index",
"==",
"right_index",
"right_index",
"=",
"right_index",
"%",
"indices",
".",
"shape",
"[",
"0",
"]",
"left",
"=",
"indices",
"[",
"left_index",
"]",
"right",
"=",
"indices",
"[",
"right_index",
"]",
"# Compute the normal vector to the hyperplane (the vector between",
"# the two points) and the offset from the origin",
"hyperplane_offset",
"=",
"0.0",
"hyperplane_vector",
"=",
"np",
".",
"empty",
"(",
"dim",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"d",
"in",
"range",
"(",
"dim",
")",
":",
"hyperplane_vector",
"[",
"d",
"]",
"=",
"data",
"[",
"left",
",",
"d",
"]",
"-",
"data",
"[",
"right",
",",
"d",
"]",
"hyperplane_offset",
"-=",
"(",
"hyperplane_vector",
"[",
"d",
"]",
"*",
"(",
"data",
"[",
"left",
",",
"d",
"]",
"+",
"data",
"[",
"right",
",",
"d",
"]",
")",
"/",
"2.0",
")",
"# For each point compute the margin (project into normal vector, add offset)",
"# If we are on lower side of the hyperplane put in one pile, otherwise",
"# put it in the other pile (if we hit hyperplane on the nose, flip a coin)",
"n_left",
"=",
"0",
"n_right",
"=",
"0",
"side",
"=",
"np",
".",
"empty",
"(",
"indices",
".",
"shape",
"[",
"0",
"]",
",",
"np",
".",
"int8",
")",
"for",
"i",
"in",
"range",
"(",
"indices",
".",
"shape",
"[",
"0",
"]",
")",
":",
"margin",
"=",
"hyperplane_offset",
"for",
"d",
"in",
"range",
"(",
"dim",
")",
":",
"margin",
"+=",
"hyperplane_vector",
"[",
"d",
"]",
"*",
"data",
"[",
"indices",
"[",
"i",
"]",
",",
"d",
"]",
"if",
"margin",
"==",
"0",
":",
"side",
"[",
"i",
"]",
"=",
"tau_rand_int",
"(",
"rng_state",
")",
"%",
"2",
"if",
"side",
"[",
"i",
"]",
"==",
"0",
":",
"n_left",
"+=",
"1",
"else",
":",
"n_right",
"+=",
"1",
"elif",
"margin",
">",
"0",
":",
"side",
"[",
"i",
"]",
"=",
"0",
"n_left",
"+=",
"1",
"else",
":",
"side",
"[",
"i",
"]",
"=",
"1",
"n_right",
"+=",
"1",
"# Now that we have the counts allocate arrays",
"indices_left",
"=",
"np",
".",
"empty",
"(",
"n_left",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"indices_right",
"=",
"np",
".",
"empty",
"(",
"n_right",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"# Populate the arrays with indices according to which side they fell on",
"n_left",
"=",
"0",
"n_right",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"side",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"side",
"[",
"i",
"]",
"==",
"0",
":",
"indices_left",
"[",
"n_left",
"]",
"=",
"indices",
"[",
"i",
"]",
"n_left",
"+=",
"1",
"else",
":",
"indices_right",
"[",
"n_right",
"]",
"=",
"indices",
"[",
"i",
"]",
"n_right",
"+=",
"1",
"return",
"indices_left",
",",
"indices_right",
",",
"hyperplane_vector",
",",
"hyperplane_offset"
]
| 34.380435 | 21.228261 |
def _add_onchain_locksroot_to_channel_settled_state_changes(
raiden: RaidenService,
storage: SQLiteStorage,
) -> None:
""" Adds `our_onchain_locksroot` and `partner_onchain_locksroot` to
ContractReceiveChannelSettled. """
batch_size = 50
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.state_change.ContractReceiveChannelSettled'),
],
)
for state_changes_batch in batch_query:
updated_state_changes = list()
for state_change in state_changes_batch:
state_change_data = json.loads(state_change.data)
msg = 'v18 state changes cant contain our_onchain_locksroot'
assert 'our_onchain_locksroot' not in state_change_data, msg
msg = 'v18 state changes cant contain partner_onchain_locksroot'
assert 'partner_onchain_locksroot' not in state_change_data, msg
token_network_identifier = state_change_data['token_network_identifier']
channel_identifier = state_change_data['channel_identifier']
channel_new_state_change = _find_channel_new_state_change(
storage=storage,
token_network_address=token_network_identifier,
channel_identifier=channel_identifier,
)
if not channel_new_state_change.data:
raise RaidenUnrecoverableError(
f'Could not find the state change for channel {channel_identifier}, '
f'token network address: {token_network_identifier} being created. ',
)
channel_state_data = json.loads(channel_new_state_change.data)
new_channel_state = channel_state_data['channel_state']
canonical_identifier = CanonicalIdentifier(
chain_identifier=-1,
token_network_address=to_canonical_address(token_network_identifier),
channel_identifier=int(channel_identifier),
)
our_locksroot, partner_locksroot = get_onchain_locksroots(
chain=raiden.chain,
canonical_identifier=canonical_identifier,
participant1=to_canonical_address(new_channel_state['our_state']['address']),
participant2=to_canonical_address(new_channel_state['partner_state']['address']),
block_identifier='latest',
)
state_change_data['our_onchain_locksroot'] = serialize_bytes(
our_locksroot,
)
state_change_data['partner_onchain_locksroot'] = serialize_bytes(
partner_locksroot,
)
updated_state_changes.append((
json.dumps(state_change_data),
state_change.state_change_identifier,
))
storage.update_state_changes(updated_state_changes) | [
"def",
"_add_onchain_locksroot_to_channel_settled_state_changes",
"(",
"raiden",
":",
"RaidenService",
",",
"storage",
":",
"SQLiteStorage",
",",
")",
"->",
"None",
":",
"batch_size",
"=",
"50",
"batch_query",
"=",
"storage",
".",
"batch_query_state_changes",
"(",
"batch_size",
"=",
"batch_size",
",",
"filters",
"=",
"[",
"(",
"'_type'",
",",
"'raiden.transfer.state_change.ContractReceiveChannelSettled'",
")",
",",
"]",
",",
")",
"for",
"state_changes_batch",
"in",
"batch_query",
":",
"updated_state_changes",
"=",
"list",
"(",
")",
"for",
"state_change",
"in",
"state_changes_batch",
":",
"state_change_data",
"=",
"json",
".",
"loads",
"(",
"state_change",
".",
"data",
")",
"msg",
"=",
"'v18 state changes cant contain our_onchain_locksroot'",
"assert",
"'our_onchain_locksroot'",
"not",
"in",
"state_change_data",
",",
"msg",
"msg",
"=",
"'v18 state changes cant contain partner_onchain_locksroot'",
"assert",
"'partner_onchain_locksroot'",
"not",
"in",
"state_change_data",
",",
"msg",
"token_network_identifier",
"=",
"state_change_data",
"[",
"'token_network_identifier'",
"]",
"channel_identifier",
"=",
"state_change_data",
"[",
"'channel_identifier'",
"]",
"channel_new_state_change",
"=",
"_find_channel_new_state_change",
"(",
"storage",
"=",
"storage",
",",
"token_network_address",
"=",
"token_network_identifier",
",",
"channel_identifier",
"=",
"channel_identifier",
",",
")",
"if",
"not",
"channel_new_state_change",
".",
"data",
":",
"raise",
"RaidenUnrecoverableError",
"(",
"f'Could not find the state change for channel {channel_identifier}, '",
"f'token network address: {token_network_identifier} being created. '",
",",
")",
"channel_state_data",
"=",
"json",
".",
"loads",
"(",
"channel_new_state_change",
".",
"data",
")",
"new_channel_state",
"=",
"channel_state_data",
"[",
"'channel_state'",
"]",
"canonical_identifier",
"=",
"CanonicalIdentifier",
"(",
"chain_identifier",
"=",
"-",
"1",
",",
"token_network_address",
"=",
"to_canonical_address",
"(",
"token_network_identifier",
")",
",",
"channel_identifier",
"=",
"int",
"(",
"channel_identifier",
")",
",",
")",
"our_locksroot",
",",
"partner_locksroot",
"=",
"get_onchain_locksroots",
"(",
"chain",
"=",
"raiden",
".",
"chain",
",",
"canonical_identifier",
"=",
"canonical_identifier",
",",
"participant1",
"=",
"to_canonical_address",
"(",
"new_channel_state",
"[",
"'our_state'",
"]",
"[",
"'address'",
"]",
")",
",",
"participant2",
"=",
"to_canonical_address",
"(",
"new_channel_state",
"[",
"'partner_state'",
"]",
"[",
"'address'",
"]",
")",
",",
"block_identifier",
"=",
"'latest'",
",",
")",
"state_change_data",
"[",
"'our_onchain_locksroot'",
"]",
"=",
"serialize_bytes",
"(",
"our_locksroot",
",",
")",
"state_change_data",
"[",
"'partner_onchain_locksroot'",
"]",
"=",
"serialize_bytes",
"(",
"partner_locksroot",
",",
")",
"updated_state_changes",
".",
"append",
"(",
"(",
"json",
".",
"dumps",
"(",
"state_change_data",
")",
",",
"state_change",
".",
"state_change_identifier",
",",
")",
")",
"storage",
".",
"update_state_changes",
"(",
"updated_state_changes",
")"
]
| 43.530303 | 24.515152 |
def pre_factor_kkt(Q, G, A):
""" Perform all one-time factorizations and cache relevant matrix products"""
nineq, nz, neq, _ = get_sizes(G, A)
# S = [ A Q^{-1} A^T A Q^{-1} G^T ]
# [ G Q^{-1} A^T G Q^{-1} G^T + D^{-1} ]
U_Q = torch.potrf(Q)
# partial cholesky of S matrix
U_S = torch.zeros(neq + nineq, neq + nineq).type_as(Q)
G_invQ_GT = torch.mm(G, torch.potrs(G.t(), U_Q))
R = G_invQ_GT
if neq > 0:
invQ_AT = torch.potrs(A.t(), U_Q)
A_invQ_AT = torch.mm(A, invQ_AT)
G_invQ_AT = torch.mm(G, invQ_AT)
# TODO: torch.potrf sometimes says the matrix is not PSD but
# numpy does? I filed an issue at
# https://github.com/pytorch/pytorch/issues/199
try:
U11 = torch.potrf(A_invQ_AT)
except:
U11 = torch.Tensor(np.linalg.cholesky(
A_invQ_AT.cpu().numpy())).type_as(A_invQ_AT)
# TODO: torch.trtrs is currently not implemented on the GPU
# and we are using gesv as a workaround.
U12 = torch.gesv(G_invQ_AT.t(), U11.t())[0]
U_S[:neq, :neq] = U11
U_S[:neq, neq:] = U12
R -= torch.mm(U12.t(), U12)
return U_Q, U_S, R | [
"def",
"pre_factor_kkt",
"(",
"Q",
",",
"G",
",",
"A",
")",
":",
"nineq",
",",
"nz",
",",
"neq",
",",
"_",
"=",
"get_sizes",
"(",
"G",
",",
"A",
")",
"# S = [ A Q^{-1} A^T A Q^{-1} G^T ]",
"# [ G Q^{-1} A^T G Q^{-1} G^T + D^{-1} ]",
"U_Q",
"=",
"torch",
".",
"potrf",
"(",
"Q",
")",
"# partial cholesky of S matrix",
"U_S",
"=",
"torch",
".",
"zeros",
"(",
"neq",
"+",
"nineq",
",",
"neq",
"+",
"nineq",
")",
".",
"type_as",
"(",
"Q",
")",
"G_invQ_GT",
"=",
"torch",
".",
"mm",
"(",
"G",
",",
"torch",
".",
"potrs",
"(",
"G",
".",
"t",
"(",
")",
",",
"U_Q",
")",
")",
"R",
"=",
"G_invQ_GT",
"if",
"neq",
">",
"0",
":",
"invQ_AT",
"=",
"torch",
".",
"potrs",
"(",
"A",
".",
"t",
"(",
")",
",",
"U_Q",
")",
"A_invQ_AT",
"=",
"torch",
".",
"mm",
"(",
"A",
",",
"invQ_AT",
")",
"G_invQ_AT",
"=",
"torch",
".",
"mm",
"(",
"G",
",",
"invQ_AT",
")",
"# TODO: torch.potrf sometimes says the matrix is not PSD but",
"# numpy does? I filed an issue at",
"# https://github.com/pytorch/pytorch/issues/199",
"try",
":",
"U11",
"=",
"torch",
".",
"potrf",
"(",
"A_invQ_AT",
")",
"except",
":",
"U11",
"=",
"torch",
".",
"Tensor",
"(",
"np",
".",
"linalg",
".",
"cholesky",
"(",
"A_invQ_AT",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
")",
")",
".",
"type_as",
"(",
"A_invQ_AT",
")",
"# TODO: torch.trtrs is currently not implemented on the GPU",
"# and we are using gesv as a workaround.",
"U12",
"=",
"torch",
".",
"gesv",
"(",
"G_invQ_AT",
".",
"t",
"(",
")",
",",
"U11",
".",
"t",
"(",
")",
")",
"[",
"0",
"]",
"U_S",
"[",
":",
"neq",
",",
":",
"neq",
"]",
"=",
"U11",
"U_S",
"[",
":",
"neq",
",",
"neq",
":",
"]",
"=",
"U12",
"R",
"-=",
"torch",
".",
"mm",
"(",
"U12",
".",
"t",
"(",
")",
",",
"U12",
")",
"return",
"U_Q",
",",
"U_S",
",",
"R"
]
| 34.342857 | 17.228571 |
def indicators_from_tag(self, indicator, tag_name, filters=None, params=None):
"""
Args:
indicator:
tag_name:
filters:
params:
Return:
"""
params = params or {}
for t in self.pivot_from_tag(indicator, tag_name, filters=filters, params=params):
yield t | [
"def",
"indicators_from_tag",
"(",
"self",
",",
"indicator",
",",
"tag_name",
",",
"filters",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"params",
"=",
"params",
"or",
"{",
"}",
"for",
"t",
"in",
"self",
".",
"pivot_from_tag",
"(",
"indicator",
",",
"tag_name",
",",
"filters",
"=",
"filters",
",",
"params",
"=",
"params",
")",
":",
"yield",
"t"
]
| 26.4 | 21.466667 |
def create_issue(self, title, content, priority=None,
milestone=None, tags=None, assignee=None,
private=None):
"""
Create a new issue.
:param title: the title of the issue
:param content: the description of the issue
:param priority: the priority of the ticket
:param milestone: the milestone of the ticket
:param tags: comma sperated list of tag for the ticket
:param assignee: the assignee of the ticket
:param private: whether create this issue as private
:return:
"""
request_url = "{}new_issue".format(self.create_basic_url())
payload = {'title': title, 'issue_content': content}
if priority is not None:
payload['priority'] = priority
if milestone is not None:
payload['milestone'] = milestone
if tags is not None:
payload['tag'] = tags
if assignee is not None:
payload['assignee'] = assignee
if private is not None:
payload['private'] = private
return_value = self._call_api(request_url,
method='POST', data=payload)
LOG.debug(return_value) | [
"def",
"create_issue",
"(",
"self",
",",
"title",
",",
"content",
",",
"priority",
"=",
"None",
",",
"milestone",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"assignee",
"=",
"None",
",",
"private",
"=",
"None",
")",
":",
"request_url",
"=",
"\"{}new_issue\"",
".",
"format",
"(",
"self",
".",
"create_basic_url",
"(",
")",
")",
"payload",
"=",
"{",
"'title'",
":",
"title",
",",
"'issue_content'",
":",
"content",
"}",
"if",
"priority",
"is",
"not",
"None",
":",
"payload",
"[",
"'priority'",
"]",
"=",
"priority",
"if",
"milestone",
"is",
"not",
"None",
":",
"payload",
"[",
"'milestone'",
"]",
"=",
"milestone",
"if",
"tags",
"is",
"not",
"None",
":",
"payload",
"[",
"'tag'",
"]",
"=",
"tags",
"if",
"assignee",
"is",
"not",
"None",
":",
"payload",
"[",
"'assignee'",
"]",
"=",
"assignee",
"if",
"private",
"is",
"not",
"None",
":",
"payload",
"[",
"'private'",
"]",
"=",
"private",
"return_value",
"=",
"self",
".",
"_call_api",
"(",
"request_url",
",",
"method",
"=",
"'POST'",
",",
"data",
"=",
"payload",
")",
"LOG",
".",
"debug",
"(",
"return_value",
")"
]
| 36.939394 | 14.575758 |
def delete_ec2_role(self, role, mount_point='aws-ec2'):
"""DELETE /auth/<mount_point>/role/<role>
:param role:
:type role:
:param mount_point:
:type mount_point:
:return:
:rtype:
"""
return self._adapter.delete('/v1/auth/{0}/role/{1}'.format(mount_point, role)) | [
"def",
"delete_ec2_role",
"(",
"self",
",",
"role",
",",
"mount_point",
"=",
"'aws-ec2'",
")",
":",
"return",
"self",
".",
"_adapter",
".",
"delete",
"(",
"'/v1/auth/{0}/role/{1}'",
".",
"format",
"(",
"mount_point",
",",
"role",
")",
")"
]
| 29.454545 | 19.818182 |
def _parse(coord, _match=_regex.match):
"""Return match groups from single sheet coordinate.
>>> Coordinates._parse('A1')
('A', '1', None, None)
>>> Coordinates._parse('A'), Coordinates._parse('1')
((None, None, 'A', None), (None, None, None, '1'))
>>> Coordinates._parse('spam')
Traceback (most recent call last):
...
ValueError: spam
"""
try:
return _match(coord).groups()
except AttributeError:
raise ValueError(coord) | [
"def",
"_parse",
"(",
"coord",
",",
"_match",
"=",
"_regex",
".",
"match",
")",
":",
"try",
":",
"return",
"_match",
"(",
"coord",
")",
".",
"groups",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"ValueError",
"(",
"coord",
")"
]
| 29.5 | 14.555556 |
def loadSignalFromWav(inputSignalFile, calibrationRealWorldValue=None, calibrationSignalFile=None, start=None,
end=None) -> Signal:
""" reads a wav file into a Signal and scales the input so that the sample are expressed in real world values
(as defined by the calibration signal).
:param inputSignalFile: a path to the input signal file
:param calibrationSignalFile: a path the calibration signal file
:param calibrationRealWorldValue: the real world value represented by the calibration signal
:param bitDepth: the bit depth of the input signal, used to rescale the value to a range of +1 to -1
:returns: a Signal
"""
inputSignal = readWav(inputSignalFile, start=start, end=end)
if calibrationSignalFile is not None:
calibrationSignal = readWav(calibrationSignalFile)
scalingFactor = calibrationRealWorldValue / np.max(calibrationSignal.samples)
return Signal(inputSignal.samples * scalingFactor, inputSignal.fs)
else:
return inputSignal | [
"def",
"loadSignalFromWav",
"(",
"inputSignalFile",
",",
"calibrationRealWorldValue",
"=",
"None",
",",
"calibrationSignalFile",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
"->",
"Signal",
":",
"inputSignal",
"=",
"readWav",
"(",
"inputSignalFile",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
")",
"if",
"calibrationSignalFile",
"is",
"not",
"None",
":",
"calibrationSignal",
"=",
"readWav",
"(",
"calibrationSignalFile",
")",
"scalingFactor",
"=",
"calibrationRealWorldValue",
"/",
"np",
".",
"max",
"(",
"calibrationSignal",
".",
"samples",
")",
"return",
"Signal",
"(",
"inputSignal",
".",
"samples",
"*",
"scalingFactor",
",",
"inputSignal",
".",
"fs",
")",
"else",
":",
"return",
"inputSignal"
]
| 60.058824 | 25.117647 |
def all_label_values(self, label_list_ids=None):
"""
Return a set of all label-values occurring in this corpus.
Args:
label_list_ids (list): If not None, only labels from label-lists with an id contained in this list
are considered.
Returns:
:class:`set`: A set of distinct label-values.
"""
values = set()
for utterance in self.utterances.values():
values = values.union(utterance.all_label_values(label_list_ids=label_list_ids))
return values | [
"def",
"all_label_values",
"(",
"self",
",",
"label_list_ids",
"=",
"None",
")",
":",
"values",
"=",
"set",
"(",
")",
"for",
"utterance",
"in",
"self",
".",
"utterances",
".",
"values",
"(",
")",
":",
"values",
"=",
"values",
".",
"union",
"(",
"utterance",
".",
"all_label_values",
"(",
"label_list_ids",
"=",
"label_list_ids",
")",
")",
"return",
"values"
]
| 33.411765 | 26 |
def check(self, **kwargs):
"""
In addition to parent class' checks, also ensure that MULTITENANT_STATICFILES_DIRS
is a tuple or a list.
"""
errors = super().check(**kwargs)
multitenant_staticfiles_dirs = settings.MULTITENANT_STATICFILES_DIRS
if not isinstance(multitenant_staticfiles_dirs, (list, tuple)):
errors.append(
Error(
"Your MULTITENANT_STATICFILES_DIRS setting is not a tuple or list.",
hint="Perhaps you forgot a trailing comma?",
)
)
return errors | [
"def",
"check",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"errors",
"=",
"super",
"(",
")",
".",
"check",
"(",
"*",
"*",
"kwargs",
")",
"multitenant_staticfiles_dirs",
"=",
"settings",
".",
"MULTITENANT_STATICFILES_DIRS",
"if",
"not",
"isinstance",
"(",
"multitenant_staticfiles_dirs",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"errors",
".",
"append",
"(",
"Error",
"(",
"\"Your MULTITENANT_STATICFILES_DIRS setting is not a tuple or list.\"",
",",
"hint",
"=",
"\"Perhaps you forgot a trailing comma?\"",
",",
")",
")",
"return",
"errors"
]
| 35.588235 | 23.235294 |
def parameters_dict(self):
"""
Get the tool parameters as a simple dictionary
:return: The tool parameters
"""
d = {}
for k, v in self.__dict__.items():
if not k.startswith("_"):
d[k] = v
return d | [
"def",
"parameters_dict",
"(",
"self",
")",
":",
"d",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"not",
"k",
".",
"startswith",
"(",
"\"_\"",
")",
":",
"d",
"[",
"k",
"]",
"=",
"v",
"return",
"d"
]
| 24.636364 | 13 |
def random_reseed(self, seed):
"""
Provide YubiHSM DRBG_CTR with a new seed.
@param seed: new seed -- must be exactly 32 bytes
@type seed: string
@returns: True on success
@rtype: bool
@see: L{pyhsm.basic_cmd.YHSM_Cmd_Random_Reseed}
"""
return pyhsm.basic_cmd.YHSM_Cmd_Random_Reseed(self.stick, seed).execute() | [
"def",
"random_reseed",
"(",
"self",
",",
"seed",
")",
":",
"return",
"pyhsm",
".",
"basic_cmd",
".",
"YHSM_Cmd_Random_Reseed",
"(",
"self",
".",
"stick",
",",
"seed",
")",
".",
"execute",
"(",
")"
]
| 28.692308 | 19.461538 |
def getSampleTypes(self, active_only=True):
"""Return all sampletypes
"""
catalog = api.get_tool("bika_setup_catalog")
query = {
"portal_type": "SampleType",
# N.B. The `sortable_title` index sorts case sensitive. Since there
# is no sort key for sample types, it makes more sense to sort
# them alphabetically in the selection
"sort_on": "title",
"sort_order": "ascending"
}
results = catalog(query)
if active_only:
results = filter(api.is_active, results)
sampletypes = map(
lambda brain: (brain.UID, brain.Title), results)
return DisplayList(sampletypes) | [
"def",
"getSampleTypes",
"(",
"self",
",",
"active_only",
"=",
"True",
")",
":",
"catalog",
"=",
"api",
".",
"get_tool",
"(",
"\"bika_setup_catalog\"",
")",
"query",
"=",
"{",
"\"portal_type\"",
":",
"\"SampleType\"",
",",
"# N.B. The `sortable_title` index sorts case sensitive. Since there",
"# is no sort key for sample types, it makes more sense to sort",
"# them alphabetically in the selection",
"\"sort_on\"",
":",
"\"title\"",
",",
"\"sort_order\"",
":",
"\"ascending\"",
"}",
"results",
"=",
"catalog",
"(",
"query",
")",
"if",
"active_only",
":",
"results",
"=",
"filter",
"(",
"api",
".",
"is_active",
",",
"results",
")",
"sampletypes",
"=",
"map",
"(",
"lambda",
"brain",
":",
"(",
"brain",
".",
"UID",
",",
"brain",
".",
"Title",
")",
",",
"results",
")",
"return",
"DisplayList",
"(",
"sampletypes",
")"
]
| 39.888889 | 13.666667 |
def properties_changed(self, sender, changed_properties, invalidated_properties):
"""
Called when a device property has changed or got invalidated.
"""
if 'Connected' in changed_properties:
if changed_properties['Connected']:
self.connect_succeeded()
else:
self.disconnect_succeeded()
if ('ServicesResolved' in changed_properties and changed_properties['ServicesResolved'] == 1 and
not self.services):
self.services_resolved() | [
"def",
"properties_changed",
"(",
"self",
",",
"sender",
",",
"changed_properties",
",",
"invalidated_properties",
")",
":",
"if",
"'Connected'",
"in",
"changed_properties",
":",
"if",
"changed_properties",
"[",
"'Connected'",
"]",
":",
"self",
".",
"connect_succeeded",
"(",
")",
"else",
":",
"self",
".",
"disconnect_succeeded",
"(",
")",
"if",
"(",
"'ServicesResolved'",
"in",
"changed_properties",
"and",
"changed_properties",
"[",
"'ServicesResolved'",
"]",
"==",
"1",
"and",
"not",
"self",
".",
"services",
")",
":",
"self",
".",
"services_resolved",
"(",
")"
]
| 41.461538 | 17 |
def create_quote(self, blogname, **kwargs):
"""
Create a quote post on a blog
:param blogname: a string, the url of the blog you want to post to.
:param state: a string, The state of the post.
:param tags: a list of tags that you want applied to the post
:param tweet: a string, the customized tweet that you want
:param date: a string, the GMT date and time of the post
:param format: a string, sets the format type of the post. html or markdown
:param slug: a string, a short text summary to the end of the post url
:param quote: a string, the full text of the quote
:param source: a string, the cited source of the quote
:returns: a dict created from the JSON response
"""
kwargs.update({"type": "quote"})
return self._send_post(blogname, kwargs) | [
"def",
"create_quote",
"(",
"self",
",",
"blogname",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"{",
"\"type\"",
":",
"\"quote\"",
"}",
")",
"return",
"self",
".",
"_send_post",
"(",
"blogname",
",",
"kwargs",
")"
]
| 47.444444 | 19.888889 |
def three_d_effect(img, **kwargs):
"""Create 3D effect using convolution"""
w = kwargs.get('weight', 1)
LOG.debug("Applying 3D effect with weight %.2f", w)
kernel = np.array([[-w, 0, w],
[-w, 1, w],
[-w, 0, w]])
mode = kwargs.get('convolve_mode', 'same')
def func(band_data, kernel=kernel, mode=mode, index=None):
del index
delay = dask.delayed(_three_d_effect_delayed)(band_data, kernel, mode)
new_data = da.from_delayed(delay, shape=band_data.shape, dtype=band_data.dtype)
return new_data
return apply_enhancement(img.data, func, separate=True, pass_dask=True) | [
"def",
"three_d_effect",
"(",
"img",
",",
"*",
"*",
"kwargs",
")",
":",
"w",
"=",
"kwargs",
".",
"get",
"(",
"'weight'",
",",
"1",
")",
"LOG",
".",
"debug",
"(",
"\"Applying 3D effect with weight %.2f\"",
",",
"w",
")",
"kernel",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"-",
"w",
",",
"0",
",",
"w",
"]",
",",
"[",
"-",
"w",
",",
"1",
",",
"w",
"]",
",",
"[",
"-",
"w",
",",
"0",
",",
"w",
"]",
"]",
")",
"mode",
"=",
"kwargs",
".",
"get",
"(",
"'convolve_mode'",
",",
"'same'",
")",
"def",
"func",
"(",
"band_data",
",",
"kernel",
"=",
"kernel",
",",
"mode",
"=",
"mode",
",",
"index",
"=",
"None",
")",
":",
"del",
"index",
"delay",
"=",
"dask",
".",
"delayed",
"(",
"_three_d_effect_delayed",
")",
"(",
"band_data",
",",
"kernel",
",",
"mode",
")",
"new_data",
"=",
"da",
".",
"from_delayed",
"(",
"delay",
",",
"shape",
"=",
"band_data",
".",
"shape",
",",
"dtype",
"=",
"band_data",
".",
"dtype",
")",
"return",
"new_data",
"return",
"apply_enhancement",
"(",
"img",
".",
"data",
",",
"func",
",",
"separate",
"=",
"True",
",",
"pass_dask",
"=",
"True",
")"
]
| 38.529412 | 20.882353 |
def _tool_to_dict(tool):
"""Parse a tool definition into a cwl2wdl style dictionary.
"""
out = {"name": _id_to_name(tool.tool["id"]),
"baseCommand": " ".join(tool.tool["baseCommand"]),
"arguments": [],
"inputs": [_input_to_dict(i) for i in tool.tool["inputs"]],
"outputs": [_output_to_dict(o) for o in tool.tool["outputs"]],
"requirements": _requirements_to_dict(tool.requirements + tool.hints),
"stdin": None, "stdout": None}
return out | [
"def",
"_tool_to_dict",
"(",
"tool",
")",
":",
"out",
"=",
"{",
"\"name\"",
":",
"_id_to_name",
"(",
"tool",
".",
"tool",
"[",
"\"id\"",
"]",
")",
",",
"\"baseCommand\"",
":",
"\" \"",
".",
"join",
"(",
"tool",
".",
"tool",
"[",
"\"baseCommand\"",
"]",
")",
",",
"\"arguments\"",
":",
"[",
"]",
",",
"\"inputs\"",
":",
"[",
"_input_to_dict",
"(",
"i",
")",
"for",
"i",
"in",
"tool",
".",
"tool",
"[",
"\"inputs\"",
"]",
"]",
",",
"\"outputs\"",
":",
"[",
"_output_to_dict",
"(",
"o",
")",
"for",
"o",
"in",
"tool",
".",
"tool",
"[",
"\"outputs\"",
"]",
"]",
",",
"\"requirements\"",
":",
"_requirements_to_dict",
"(",
"tool",
".",
"requirements",
"+",
"tool",
".",
"hints",
")",
",",
"\"stdin\"",
":",
"None",
",",
"\"stdout\"",
":",
"None",
"}",
"return",
"out"
]
| 46.272727 | 17.181818 |
def deprecated(func):
"""
This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted
when the function is used.
:param func: The function to run
:return: function
"""
def deprecation_warning(*args, **kwargs):
warnings.warn('Call to deprecated function {name}. Please consult our documentation at '
'http://pyapi-gitlab.readthedocs.io/en/latest/#gitlab.Gitlab.{name}'.format(name=func.__name__),
category=DeprecationWarning)
return func(*args, **kwargs)
deprecation_warning.__name__ = func.__name__
deprecation_warning.__doc__ = func.__doc__
deprecation_warning.__dict__ = func.__dict__
return deprecation_warning | [
"def",
"deprecated",
"(",
"func",
")",
":",
"def",
"deprecation_warning",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"'Call to deprecated function {name}. Please consult our documentation at '",
"'http://pyapi-gitlab.readthedocs.io/en/latest/#gitlab.Gitlab.{name}'",
".",
"format",
"(",
"name",
"=",
"func",
".",
"__name__",
")",
",",
"category",
"=",
"DeprecationWarning",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"deprecation_warning",
".",
"__name__",
"=",
"func",
".",
"__name__",
"deprecation_warning",
".",
"__doc__",
"=",
"func",
".",
"__doc__",
"deprecation_warning",
".",
"__dict__",
"=",
"func",
".",
"__dict__",
"return",
"deprecation_warning"
]
| 44.411765 | 20.764706 |
def delete(self, client=None):
"""API call: delete a metric via a DELETE request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/delete
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current metric.
"""
client = self._require_client(client)
client.metrics_api.metric_delete(self.project, self.name) | [
"def",
"delete",
"(",
"self",
",",
"client",
"=",
"None",
")",
":",
"client",
"=",
"self",
".",
"_require_client",
"(",
"client",
")",
"client",
".",
"metrics_api",
".",
"metric_delete",
"(",
"self",
".",
"project",
",",
"self",
".",
"name",
")"
]
| 42.384615 | 22.461538 |
def on_balance_volume(close_data, volume):
"""
On Balance Volume.
Formula:
start = 1
if CLOSEt > CLOSEt-1
obv = obvt-1 + volumet
elif CLOSEt < CLOSEt-1
obv = obvt-1 - volumet
elif CLOSEt == CLOSTt-1
obv = obvt-1
"""
catch_errors.check_for_input_len_diff(close_data, volume)
obv = np.zeros(len(volume))
obv[0] = 1
for idx in range(1, len(obv)):
if close_data[idx] > close_data[idx-1]:
obv[idx] = obv[idx-1] + volume[idx]
elif close_data[idx] < close_data[idx-1]:
obv[idx] = obv[idx-1] - volume[idx]
elif close_data[idx] == close_data[idx-1]:
obv[idx] = obv[idx-1]
return obv | [
"def",
"on_balance_volume",
"(",
"close_data",
",",
"volume",
")",
":",
"catch_errors",
".",
"check_for_input_len_diff",
"(",
"close_data",
",",
"volume",
")",
"obv",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"volume",
")",
")",
"obv",
"[",
"0",
"]",
"=",
"1",
"for",
"idx",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"obv",
")",
")",
":",
"if",
"close_data",
"[",
"idx",
"]",
">",
"close_data",
"[",
"idx",
"-",
"1",
"]",
":",
"obv",
"[",
"idx",
"]",
"=",
"obv",
"[",
"idx",
"-",
"1",
"]",
"+",
"volume",
"[",
"idx",
"]",
"elif",
"close_data",
"[",
"idx",
"]",
"<",
"close_data",
"[",
"idx",
"-",
"1",
"]",
":",
"obv",
"[",
"idx",
"]",
"=",
"obv",
"[",
"idx",
"-",
"1",
"]",
"-",
"volume",
"[",
"idx",
"]",
"elif",
"close_data",
"[",
"idx",
"]",
"==",
"close_data",
"[",
"idx",
"-",
"1",
"]",
":",
"obv",
"[",
"idx",
"]",
"=",
"obv",
"[",
"idx",
"-",
"1",
"]",
"return",
"obv"
]
| 28.625 | 13.875 |
def make_next_param(login_url, current_url):
'''
Reduces the scheme and host from a given URL so it can be passed to
the given `login` URL more efficiently.
:param login_url: The login URL being redirected to.
:type login_url: str
:param current_url: The URL to reduce.
:type current_url: str
'''
l = urlparse(login_url)
c = urlparse(current_url)
if (not l.scheme or l.scheme == c.scheme) and \
(not l.netloc or l.netloc == c.netloc):
return urlunparse(('', '', c.path, c.params, c.query, ''))
return current_url | [
"def",
"make_next_param",
"(",
"login_url",
",",
"current_url",
")",
":",
"l",
"=",
"urlparse",
"(",
"login_url",
")",
"c",
"=",
"urlparse",
"(",
"current_url",
")",
"if",
"(",
"not",
"l",
".",
"scheme",
"or",
"l",
".",
"scheme",
"==",
"c",
".",
"scheme",
")",
"and",
"(",
"not",
"l",
".",
"netloc",
"or",
"l",
".",
"netloc",
"==",
"c",
".",
"netloc",
")",
":",
"return",
"urlunparse",
"(",
"(",
"''",
",",
"''",
",",
"c",
".",
"path",
",",
"c",
".",
"params",
",",
"c",
".",
"query",
",",
"''",
")",
")",
"return",
"current_url"
]
| 33.294118 | 18.941176 |
def to_native(self, value, context=None):
""" Schematics deserializer override
We return a phoenumbers.PhoneNumber object so any kind
of formatting can be trivially performed. Additionally,
some convenient properties have been added:
e164: string formatted '+11234567890'
pretty: string formatted '(123) 456-7890'
:return: phonenumbers.PhoneNumber
"""
if isinstance(value, pn.phonenumber.PhoneNumber):
return value
try:
phone = pn.parse(value, 'US')
valid = pn.is_valid_number(phone)
except (NumberParseException, TypeError):
raise ConversionError(self.messages['convert'])
if not valid and pn.is_possible_number(phone):
raise ConversionError(self.messages['invalid'])
elif not valid:
raise ConversionError(self.messages['convert'])
phone.e164 = pn.format_number(phone, pn.PhoneNumberFormat.E164)
phone.pretty = pn.format_number(phone, pn.PhoneNumberFormat.NATIONAL)
return phone | [
"def",
"to_native",
"(",
"self",
",",
"value",
",",
"context",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"pn",
".",
"phonenumber",
".",
"PhoneNumber",
")",
":",
"return",
"value",
"try",
":",
"phone",
"=",
"pn",
".",
"parse",
"(",
"value",
",",
"'US'",
")",
"valid",
"=",
"pn",
".",
"is_valid_number",
"(",
"phone",
")",
"except",
"(",
"NumberParseException",
",",
"TypeError",
")",
":",
"raise",
"ConversionError",
"(",
"self",
".",
"messages",
"[",
"'convert'",
"]",
")",
"if",
"not",
"valid",
"and",
"pn",
".",
"is_possible_number",
"(",
"phone",
")",
":",
"raise",
"ConversionError",
"(",
"self",
".",
"messages",
"[",
"'invalid'",
"]",
")",
"elif",
"not",
"valid",
":",
"raise",
"ConversionError",
"(",
"self",
".",
"messages",
"[",
"'convert'",
"]",
")",
"phone",
".",
"e164",
"=",
"pn",
".",
"format_number",
"(",
"phone",
",",
"pn",
".",
"PhoneNumberFormat",
".",
"E164",
")",
"phone",
".",
"pretty",
"=",
"pn",
".",
"format_number",
"(",
"phone",
",",
"pn",
".",
"PhoneNumberFormat",
".",
"NATIONAL",
")",
"return",
"phone"
]
| 34.354839 | 21.032258 |
def exponential_sleep_generator(initial, maximum, multiplier=_DEFAULT_DELAY_MULTIPLIER):
"""Generates sleep intervals based on the exponential back-off algorithm.
This implements the `Truncated Exponential Back-off`_ algorithm.
.. _Truncated Exponential Back-off:
https://cloud.google.com/storage/docs/exponential-backoff
Args:
initial (float): The minimum about of time to delay. This must
be greater than 0.
maximum (float): The maximum about of time to delay.
multiplier (float): The multiplier applied to the delay.
Yields:
float: successive sleep intervals.
"""
delay = initial
while True:
# Introduce jitter by yielding a delay that is uniformly distributed
# to average out to the delay time.
yield min(random.uniform(0.0, delay * 2.0), maximum)
delay = delay * multiplier | [
"def",
"exponential_sleep_generator",
"(",
"initial",
",",
"maximum",
",",
"multiplier",
"=",
"_DEFAULT_DELAY_MULTIPLIER",
")",
":",
"delay",
"=",
"initial",
"while",
"True",
":",
"# Introduce jitter by yielding a delay that is uniformly distributed",
"# to average out to the delay time.",
"yield",
"min",
"(",
"random",
".",
"uniform",
"(",
"0.0",
",",
"delay",
"*",
"2.0",
")",
",",
"maximum",
")",
"delay",
"=",
"delay",
"*",
"multiplier"
]
| 38.130435 | 22.565217 |
def patch_ligotimegps(module="ligo.lw.lsctables"):
"""Context manager to on-the-fly patch LIGOTimeGPS to accept all int types
"""
module = import_module(module)
orig = module.LIGOTimeGPS
module.LIGOTimeGPS = _ligotimegps
try:
yield
finally:
module.LIGOTimeGPS = orig | [
"def",
"patch_ligotimegps",
"(",
"module",
"=",
"\"ligo.lw.lsctables\"",
")",
":",
"module",
"=",
"import_module",
"(",
"module",
")",
"orig",
"=",
"module",
".",
"LIGOTimeGPS",
"module",
".",
"LIGOTimeGPS",
"=",
"_ligotimegps",
"try",
":",
"yield",
"finally",
":",
"module",
".",
"LIGOTimeGPS",
"=",
"orig"
]
| 30.1 | 12.4 |
def ipv4(self, network=False, address_class=None, private=None):
"""
Produce a random IPv4 address or network with a valid CIDR.
:param network: Network address
:param address_class: IPv4 address class (a, b, or c)
:param private: Public or private
:returns: IPv4
"""
if private is True:
return self.ipv4_private(address_class=address_class,
network=network)
elif private is False:
return self.ipv4_public(address_class=address_class,
network=network)
# if neither private nor public is required explicitly,
# generate from whole requested address space
if address_class:
all_networks = [_IPv4Constants._network_classes[address_class]]
else:
# if no address class is choosen, use whole IPv4 pool
all_networks = [ip_network('0.0.0.0/0')]
# exclude special networks
all_networks = self._exclude_ipv4_networks(
all_networks,
_IPv4Constants._excluded_networks,
)
# choose random network from the list
random_network = self.generator.random.choice(all_networks)
return self._random_ipv4_address_from_subnet(random_network, network) | [
"def",
"ipv4",
"(",
"self",
",",
"network",
"=",
"False",
",",
"address_class",
"=",
"None",
",",
"private",
"=",
"None",
")",
":",
"if",
"private",
"is",
"True",
":",
"return",
"self",
".",
"ipv4_private",
"(",
"address_class",
"=",
"address_class",
",",
"network",
"=",
"network",
")",
"elif",
"private",
"is",
"False",
":",
"return",
"self",
".",
"ipv4_public",
"(",
"address_class",
"=",
"address_class",
",",
"network",
"=",
"network",
")",
"# if neither private nor public is required explicitly,",
"# generate from whole requested address space",
"if",
"address_class",
":",
"all_networks",
"=",
"[",
"_IPv4Constants",
".",
"_network_classes",
"[",
"address_class",
"]",
"]",
"else",
":",
"# if no address class is choosen, use whole IPv4 pool",
"all_networks",
"=",
"[",
"ip_network",
"(",
"'0.0.0.0/0'",
")",
"]",
"# exclude special networks",
"all_networks",
"=",
"self",
".",
"_exclude_ipv4_networks",
"(",
"all_networks",
",",
"_IPv4Constants",
".",
"_excluded_networks",
",",
")",
"# choose random network from the list",
"random_network",
"=",
"self",
".",
"generator",
".",
"random",
".",
"choice",
"(",
"all_networks",
")",
"return",
"self",
".",
"_random_ipv4_address_from_subnet",
"(",
"random_network",
",",
"network",
")"
]
| 38.441176 | 19.911765 |
def calculate_acf(data, delta_t=1.0, unbiased=False):
r"""Calculates the one-sided autocorrelation function.
Calculates the autocorrelation function (ACF) and returns the one-sided
ACF. The ACF is defined as the autocovariance divided by the variance. The
ACF can be estimated using
.. math::
\hat{R}(k) = \frac{1}{n \sigma^{2}} \sum_{t=1}^{n-k} \left( X_{t} - \mu \right) \left( X_{t+k} - \mu \right)
Where :math:`\hat{R}(k)` is the ACF, :math:`X_{t}` is the data series at
time t, :math:`\mu` is the mean of :math:`X_{t}`, and :math:`\sigma^{2}` is
the variance of :math:`X_{t}`.
Parameters
-----------
data : TimeSeries or numpy.array
A TimeSeries or numpy.array of data.
delta_t : float
The time step of the data series if it is not a TimeSeries instance.
unbiased : bool
If True the normalization of the autocovariance function is n-k
instead of n. This is called the unbiased estimation of the
autocovariance. Note that this does not mean the ACF is unbiased.
Returns
-------
acf : numpy.array
If data is a TimeSeries then acf will be a TimeSeries of the
one-sided ACF. Else acf is a numpy.array.
"""
# if given a TimeSeries instance then get numpy.array
if isinstance(data, TimeSeries):
y = data.numpy()
delta_t = data.delta_t
else:
y = data
# Zero mean
y = y - y.mean()
ny_orig = len(y)
npad = 1
while npad < 2*ny_orig:
npad = npad << 1
ypad = numpy.zeros(npad)
ypad[:ny_orig] = y
# FFT data minus the mean
fdata = TimeSeries(ypad, delta_t=delta_t).to_frequencyseries()
# correlate
# do not need to give the congjugate since correlate function does it
cdata = FrequencySeries(zeros(len(fdata), dtype=fdata.dtype),
delta_f=fdata.delta_f, copy=False)
correlate(fdata, fdata, cdata)
# IFFT correlated data to get unnormalized autocovariance time series
acf = cdata.to_timeseries()
acf = acf[:ny_orig]
# normalize the autocovariance
# note that dividing by acf[0] is the same as ( y.var() * len(acf) )
if unbiased:
acf /= ( y.var() * numpy.arange(len(acf), 0, -1) )
else:
acf /= acf[0]
# return input datatype
if isinstance(data, TimeSeries):
return TimeSeries(acf, delta_t=delta_t)
else:
return acf | [
"def",
"calculate_acf",
"(",
"data",
",",
"delta_t",
"=",
"1.0",
",",
"unbiased",
"=",
"False",
")",
":",
"# if given a TimeSeries instance then get numpy.array",
"if",
"isinstance",
"(",
"data",
",",
"TimeSeries",
")",
":",
"y",
"=",
"data",
".",
"numpy",
"(",
")",
"delta_t",
"=",
"data",
".",
"delta_t",
"else",
":",
"y",
"=",
"data",
"# Zero mean",
"y",
"=",
"y",
"-",
"y",
".",
"mean",
"(",
")",
"ny_orig",
"=",
"len",
"(",
"y",
")",
"npad",
"=",
"1",
"while",
"npad",
"<",
"2",
"*",
"ny_orig",
":",
"npad",
"=",
"npad",
"<<",
"1",
"ypad",
"=",
"numpy",
".",
"zeros",
"(",
"npad",
")",
"ypad",
"[",
":",
"ny_orig",
"]",
"=",
"y",
"# FFT data minus the mean",
"fdata",
"=",
"TimeSeries",
"(",
"ypad",
",",
"delta_t",
"=",
"delta_t",
")",
".",
"to_frequencyseries",
"(",
")",
"# correlate",
"# do not need to give the congjugate since correlate function does it",
"cdata",
"=",
"FrequencySeries",
"(",
"zeros",
"(",
"len",
"(",
"fdata",
")",
",",
"dtype",
"=",
"fdata",
".",
"dtype",
")",
",",
"delta_f",
"=",
"fdata",
".",
"delta_f",
",",
"copy",
"=",
"False",
")",
"correlate",
"(",
"fdata",
",",
"fdata",
",",
"cdata",
")",
"# IFFT correlated data to get unnormalized autocovariance time series",
"acf",
"=",
"cdata",
".",
"to_timeseries",
"(",
")",
"acf",
"=",
"acf",
"[",
":",
"ny_orig",
"]",
"# normalize the autocovariance",
"# note that dividing by acf[0] is the same as ( y.var() * len(acf) )",
"if",
"unbiased",
":",
"acf",
"/=",
"(",
"y",
".",
"var",
"(",
")",
"*",
"numpy",
".",
"arange",
"(",
"len",
"(",
"acf",
")",
",",
"0",
",",
"-",
"1",
")",
")",
"else",
":",
"acf",
"/=",
"acf",
"[",
"0",
"]",
"# return input datatype",
"if",
"isinstance",
"(",
"data",
",",
"TimeSeries",
")",
":",
"return",
"TimeSeries",
"(",
"acf",
",",
"delta_t",
"=",
"delta_t",
")",
"else",
":",
"return",
"acf"
]
| 31.6 | 24.653333 |
def process_tree(top, name='top'):
"""Creates a string representation of the process tree for process top.
This method uses the :func:`walk_processes` method to create the process tree.
:param top: top process for which process tree string should be
created
:type top: :class:`~climlab.process.process.Process`
:param str name: name of top process
:returns: string representation of the process tree
:rtype: str
:Example:
::
>>> import climlab
>>> from climlab.utils import walk
>>> model = climlab.EBM()
>>> proc_tree_str = walk.process_tree(model, name='model')
>>> print proc_tree_str
model: <class 'climlab.model.ebm.EBM'>
diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>
LW: <class 'climlab.radiation.AplusBT.AplusBT'>
albedo: <class 'climlab.surface.albedo.StepFunctionAlbedo'>
iceline: <class 'climlab.surface.albedo.Iceline'>
cold_albedo: <class 'climlab.surface.albedo.ConstantAlbedo'>
warm_albedo: <class 'climlab.surface.albedo.P2Albedo'>
insolation: <class 'climlab.radiation.insolation.P2Insolation'>
"""
str1 = ''
for name, proc, level in walk_processes(top, name, ignoreFlag=True):
indent = ' ' * 3 * (level)
str1 += ('{}{}: {}\n'.format(indent, name, type(proc)))
return str1 | [
"def",
"process_tree",
"(",
"top",
",",
"name",
"=",
"'top'",
")",
":",
"str1",
"=",
"''",
"for",
"name",
",",
"proc",
",",
"level",
"in",
"walk_processes",
"(",
"top",
",",
"name",
",",
"ignoreFlag",
"=",
"True",
")",
":",
"indent",
"=",
"' '",
"*",
"3",
"*",
"(",
"level",
")",
"str1",
"+=",
"(",
"'{}{}: {}\\n'",
".",
"format",
"(",
"indent",
",",
"name",
",",
"type",
"(",
"proc",
")",
")",
")",
"return",
"str1"
]
| 40.236842 | 25.078947 |
def load(self, items):
"""
Populate this section from an iteration of the parse_items call
"""
for k, vals in items:
self[k] = "".join(vals) | [
"def",
"load",
"(",
"self",
",",
"items",
")",
":",
"for",
"k",
",",
"vals",
"in",
"items",
":",
"self",
"[",
"k",
"]",
"=",
"\"\"",
".",
"join",
"(",
"vals",
")"
]
| 25.571429 | 15 |
def int2str(num, radix=10, alphabet=BASE85):
"""helper function for quick base conversions from integers to strings"""
return NumConv(radix, alphabet).int2str(num) | [
"def",
"int2str",
"(",
"num",
",",
"radix",
"=",
"10",
",",
"alphabet",
"=",
"BASE85",
")",
":",
"return",
"NumConv",
"(",
"radix",
",",
"alphabet",
")",
".",
"int2str",
"(",
"num",
")"
]
| 56.333333 | 4 |
def no_llvm(*args, uid=0, gid=0, **kwargs):
"""
Return a customizable uchroot command.
The command will be executed inside a uchroot environment.
Args:
args: List of additional arguments for uchroot (typical: mounts)
Return:
chroot_cmd
"""
uchroot_cmd = no_args()
uchroot_cmd = uchroot_cmd[__default_opts__(uid, gid)]
return uchroot_cmd[args] | [
"def",
"no_llvm",
"(",
"*",
"args",
",",
"uid",
"=",
"0",
",",
"gid",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"uchroot_cmd",
"=",
"no_args",
"(",
")",
"uchroot_cmd",
"=",
"uchroot_cmd",
"[",
"__default_opts__",
"(",
"uid",
",",
"gid",
")",
"]",
"return",
"uchroot_cmd",
"[",
"args",
"]"
]
| 27.357143 | 18.785714 |
def dot_alignment(sequences, seq_field=None, name_field=None, root=None, root_name=None,
cluster_threshold=0.75, as_fasta=False, just_alignment=False):
'''
Creates a dot alignment (dots indicate identity, mismatches are represented by the mismatched
residue) for a list of sequences.
Args:
sequence (list(Sequence)): A list of Sequence objects to be aligned.
seq_field (str): Name of the sequence field key. Default is ``vdj_nt``.
name_field (str): Name of the name field key. Default is ``seq_id``.
root (str, Sequence): The sequence used to 'root' the alignment. This sequence will be at the
top of the alignment and is the sequence against which dots (identity) will be evaluated.
Can be provided either as a string corresponding to the name of one of the sequences in
``sequences`` or as a Sequence object. If not provided, ``sequences`` will be clustered
at ``cluster_threshold`` and the centroid of the largest cluster will be used.
root_name (str): Name of the root sequence. If not provided, the existing name of the root
sequence (``name_field``) will be used. If ``root`` is not provided, the default ``root_name``
is ``'centroid'``.
cluster_threshold (float): Threshold with which to cluster sequences if ``root`` is not provided.
Default is ``0.75``.
as_fasta (bool): If ``True``, returns the dot alignment as a FASTA-formatted string, rather than
a string formatted for human readability.
just_alignment (bool): If ``True``, returns just the dot-aligned sequences as a list.
Returns:
If ``just_alignment`` is ``True``, a list of dot-aligned sequences (without sequence names) will be returned.
If ``as_fasta`` is ``True``, a string containing the dot-aligned sequences in FASTA format will be returned.
Otherwise, a formatted string containing the aligned sequences (with sequence names) will be returned.
'''
import abstar
from .cluster import cluster
sequences = deepcopy(sequences)
root = copy(root)
# if custom seq_field is specified, copy to the .alignment_sequence attribute
if seq_field is not None:
if not all([seq_field in list(s.annotations.keys()) for s in sequences]):
print('\nERROR: {} is not present in all of the supplied sequences.\n'.format(seq_field))
sys.exit(1)
for s in sequences:
s.alignment_sequence = s[seq_field]
else:
for s in sequences:
s.alignment_sequence = s.sequence
# if custom name_field is specified, copy to the .id attribute
if name_field is not None:
if not all([name_field in list(s.annotations.keys()) for s in sequences]):
print('\nERROR: {} is not present in all of the supplied sequences.\n'.format(name_field))
sys.exit(1)
for s in sequences:
s.alignment_id = s[name_field]
else:
for s in sequences:
s.alignment_id = s.id
# parse the root sequence
if all([root is None, root_name is None]):
clusters = cluster(sequences, threshold=cluster_threshold, quiet=True)
clusters = sorted(clusters, key=lambda x: x.size, reverse=True)
centroid = clusters[0].centroid
root = abstar.run(('centroid', centroid.sequence))
root.alignment_id = 'centroid'
root.alignment_sequence = root[seq_field]
elif type(root) in STR_TYPES:
root = [s for s in sequences if s.alignment_id == root][0]
if not root:
print('\nERROR: The name of the root sequence ({}) was not found in the list of input sequences.'.format(root))
print('\n')
sys.exit(1)
sequences = [s for s in sequences if s.alignment_id != root.alignment_id]
elif type(root) == Sequence:
if seq_field is not None:
if seq_field not in list(root.anotations.keys()):
print('\nERROR: {} is not present in the supplied root sequence.\n'.format(seq_field))
sys.exit(1)
root.alignment_sequence = root[seq_field]
if name_field is not None:
if name_field not in list(root.anotations.keys()):
print('\nERROR: {} is not present in the supplied root sequence.\n'.format(name_field))
sys.exit(1)
root.alignment_id = root[name_field]
sequences = [s for s in sequences if s.alignment_id != root.alignment_id]
else:
print('\nERROR: If root is provided, it must be the name of a sequence \
found in the supplied list of sequences or it must be a Sequence object.')
print('\n')
sys.exit(1)
if root_name is not None:
root.alignment_id = root_name
else:
root_name = root.alignment_id
# compute and parse the alignment
seqs = [(root.alignment_id, root.alignment_sequence)]
seqs += [(s.alignment_id, s.alignment_sequence) for s in sequences]
aln = muscle(seqs)
g_aln = [a for a in aln if a.id == root_name][0]
dots = [(root_name, str(g_aln.seq)), ]
for seq in [a for a in aln if a.id != root_name]:
s_aln = ''
for g, q in zip(str(g_aln.seq), str(seq.seq)):
if g == q == '-':
s_aln += '-'
elif g == q:
s_aln += '.'
else:
s_aln += q
dots.append((seq.id, s_aln))
if just_alignment:
return [d[1] for d in dots]
name_len = max([len(d[0]) for d in dots]) + 2
dot_aln = []
for d in dots:
if as_fasta:
dot_aln.append('>{}\n{}'.format(d[0], d[1]))
else:
spaces = name_len - len(d[0])
dot_aln.append(d[0] + ' ' * spaces + d[1])
return '\n'.join(dot_aln) | [
"def",
"dot_alignment",
"(",
"sequences",
",",
"seq_field",
"=",
"None",
",",
"name_field",
"=",
"None",
",",
"root",
"=",
"None",
",",
"root_name",
"=",
"None",
",",
"cluster_threshold",
"=",
"0.75",
",",
"as_fasta",
"=",
"False",
",",
"just_alignment",
"=",
"False",
")",
":",
"import",
"abstar",
"from",
".",
"cluster",
"import",
"cluster",
"sequences",
"=",
"deepcopy",
"(",
"sequences",
")",
"root",
"=",
"copy",
"(",
"root",
")",
"# if custom seq_field is specified, copy to the .alignment_sequence attribute",
"if",
"seq_field",
"is",
"not",
"None",
":",
"if",
"not",
"all",
"(",
"[",
"seq_field",
"in",
"list",
"(",
"s",
".",
"annotations",
".",
"keys",
"(",
")",
")",
"for",
"s",
"in",
"sequences",
"]",
")",
":",
"print",
"(",
"'\\nERROR: {} is not present in all of the supplied sequences.\\n'",
".",
"format",
"(",
"seq_field",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"for",
"s",
"in",
"sequences",
":",
"s",
".",
"alignment_sequence",
"=",
"s",
"[",
"seq_field",
"]",
"else",
":",
"for",
"s",
"in",
"sequences",
":",
"s",
".",
"alignment_sequence",
"=",
"s",
".",
"sequence",
"# if custom name_field is specified, copy to the .id attribute",
"if",
"name_field",
"is",
"not",
"None",
":",
"if",
"not",
"all",
"(",
"[",
"name_field",
"in",
"list",
"(",
"s",
".",
"annotations",
".",
"keys",
"(",
")",
")",
"for",
"s",
"in",
"sequences",
"]",
")",
":",
"print",
"(",
"'\\nERROR: {} is not present in all of the supplied sequences.\\n'",
".",
"format",
"(",
"name_field",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"for",
"s",
"in",
"sequences",
":",
"s",
".",
"alignment_id",
"=",
"s",
"[",
"name_field",
"]",
"else",
":",
"for",
"s",
"in",
"sequences",
":",
"s",
".",
"alignment_id",
"=",
"s",
".",
"id",
"# parse the root sequence",
"if",
"all",
"(",
"[",
"root",
"is",
"None",
",",
"root_name",
"is",
"None",
"]",
")",
":",
"clusters",
"=",
"cluster",
"(",
"sequences",
",",
"threshold",
"=",
"cluster_threshold",
",",
"quiet",
"=",
"True",
")",
"clusters",
"=",
"sorted",
"(",
"clusters",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"size",
",",
"reverse",
"=",
"True",
")",
"centroid",
"=",
"clusters",
"[",
"0",
"]",
".",
"centroid",
"root",
"=",
"abstar",
".",
"run",
"(",
"(",
"'centroid'",
",",
"centroid",
".",
"sequence",
")",
")",
"root",
".",
"alignment_id",
"=",
"'centroid'",
"root",
".",
"alignment_sequence",
"=",
"root",
"[",
"seq_field",
"]",
"elif",
"type",
"(",
"root",
")",
"in",
"STR_TYPES",
":",
"root",
"=",
"[",
"s",
"for",
"s",
"in",
"sequences",
"if",
"s",
".",
"alignment_id",
"==",
"root",
"]",
"[",
"0",
"]",
"if",
"not",
"root",
":",
"print",
"(",
"'\\nERROR: The name of the root sequence ({}) was not found in the list of input sequences.'",
".",
"format",
"(",
"root",
")",
")",
"print",
"(",
"'\\n'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"sequences",
"=",
"[",
"s",
"for",
"s",
"in",
"sequences",
"if",
"s",
".",
"alignment_id",
"!=",
"root",
".",
"alignment_id",
"]",
"elif",
"type",
"(",
"root",
")",
"==",
"Sequence",
":",
"if",
"seq_field",
"is",
"not",
"None",
":",
"if",
"seq_field",
"not",
"in",
"list",
"(",
"root",
".",
"anotations",
".",
"keys",
"(",
")",
")",
":",
"print",
"(",
"'\\nERROR: {} is not present in the supplied root sequence.\\n'",
".",
"format",
"(",
"seq_field",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"root",
".",
"alignment_sequence",
"=",
"root",
"[",
"seq_field",
"]",
"if",
"name_field",
"is",
"not",
"None",
":",
"if",
"name_field",
"not",
"in",
"list",
"(",
"root",
".",
"anotations",
".",
"keys",
"(",
")",
")",
":",
"print",
"(",
"'\\nERROR: {} is not present in the supplied root sequence.\\n'",
".",
"format",
"(",
"name_field",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"root",
".",
"alignment_id",
"=",
"root",
"[",
"name_field",
"]",
"sequences",
"=",
"[",
"s",
"for",
"s",
"in",
"sequences",
"if",
"s",
".",
"alignment_id",
"!=",
"root",
".",
"alignment_id",
"]",
"else",
":",
"print",
"(",
"'\\nERROR: If root is provided, it must be the name of a sequence \\\n found in the supplied list of sequences or it must be a Sequence object.'",
")",
"print",
"(",
"'\\n'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"root_name",
"is",
"not",
"None",
":",
"root",
".",
"alignment_id",
"=",
"root_name",
"else",
":",
"root_name",
"=",
"root",
".",
"alignment_id",
"# compute and parse the alignment",
"seqs",
"=",
"[",
"(",
"root",
".",
"alignment_id",
",",
"root",
".",
"alignment_sequence",
")",
"]",
"seqs",
"+=",
"[",
"(",
"s",
".",
"alignment_id",
",",
"s",
".",
"alignment_sequence",
")",
"for",
"s",
"in",
"sequences",
"]",
"aln",
"=",
"muscle",
"(",
"seqs",
")",
"g_aln",
"=",
"[",
"a",
"for",
"a",
"in",
"aln",
"if",
"a",
".",
"id",
"==",
"root_name",
"]",
"[",
"0",
"]",
"dots",
"=",
"[",
"(",
"root_name",
",",
"str",
"(",
"g_aln",
".",
"seq",
")",
")",
",",
"]",
"for",
"seq",
"in",
"[",
"a",
"for",
"a",
"in",
"aln",
"if",
"a",
".",
"id",
"!=",
"root_name",
"]",
":",
"s_aln",
"=",
"''",
"for",
"g",
",",
"q",
"in",
"zip",
"(",
"str",
"(",
"g_aln",
".",
"seq",
")",
",",
"str",
"(",
"seq",
".",
"seq",
")",
")",
":",
"if",
"g",
"==",
"q",
"==",
"'-'",
":",
"s_aln",
"+=",
"'-'",
"elif",
"g",
"==",
"q",
":",
"s_aln",
"+=",
"'.'",
"else",
":",
"s_aln",
"+=",
"q",
"dots",
".",
"append",
"(",
"(",
"seq",
".",
"id",
",",
"s_aln",
")",
")",
"if",
"just_alignment",
":",
"return",
"[",
"d",
"[",
"1",
"]",
"for",
"d",
"in",
"dots",
"]",
"name_len",
"=",
"max",
"(",
"[",
"len",
"(",
"d",
"[",
"0",
"]",
")",
"for",
"d",
"in",
"dots",
"]",
")",
"+",
"2",
"dot_aln",
"=",
"[",
"]",
"for",
"d",
"in",
"dots",
":",
"if",
"as_fasta",
":",
"dot_aln",
".",
"append",
"(",
"'>{}\\n{}'",
".",
"format",
"(",
"d",
"[",
"0",
"]",
",",
"d",
"[",
"1",
"]",
")",
")",
"else",
":",
"spaces",
"=",
"name_len",
"-",
"len",
"(",
"d",
"[",
"0",
"]",
")",
"dot_aln",
".",
"append",
"(",
"d",
"[",
"0",
"]",
"+",
"' '",
"*",
"spaces",
"+",
"d",
"[",
"1",
"]",
")",
"return",
"'\\n'",
".",
"join",
"(",
"dot_aln",
")"
]
| 43.530303 | 27.742424 |
def write_docstring_for_shortcut(self):
"""Write docstring to editor by shortcut of code editor."""
# cursor placed below function definition
result = self.get_function_definition_from_below_last_line()
if result is not None:
__, number_of_lines_of_function = result
cursor = self.code_editor.textCursor()
for __ in range(number_of_lines_of_function):
cursor.movePosition(QTextCursor.PreviousBlock)
self.code_editor.setTextCursor(cursor)
cursor = self.code_editor.textCursor()
self.line_number_cursor = cursor.blockNumber() + 1
self.write_docstring_at_first_line_of_function() | [
"def",
"write_docstring_for_shortcut",
"(",
"self",
")",
":",
"# cursor placed below function definition\r",
"result",
"=",
"self",
".",
"get_function_definition_from_below_last_line",
"(",
")",
"if",
"result",
"is",
"not",
"None",
":",
"__",
",",
"number_of_lines_of_function",
"=",
"result",
"cursor",
"=",
"self",
".",
"code_editor",
".",
"textCursor",
"(",
")",
"for",
"__",
"in",
"range",
"(",
"number_of_lines_of_function",
")",
":",
"cursor",
".",
"movePosition",
"(",
"QTextCursor",
".",
"PreviousBlock",
")",
"self",
".",
"code_editor",
".",
"setTextCursor",
"(",
"cursor",
")",
"cursor",
"=",
"self",
".",
"code_editor",
".",
"textCursor",
"(",
")",
"self",
".",
"line_number_cursor",
"=",
"cursor",
".",
"blockNumber",
"(",
")",
"+",
"1",
"self",
".",
"write_docstring_at_first_line_of_function",
"(",
")"
]
| 43.6875 | 17.6875 |
def indicator(self):
"""Produce the spinner."""
while self.run:
try:
size = self.work_q.qsize()
except Exception:
note = 'Please wait '
else:
note = 'Number of Jobs in Queue = %s ' % size
if self.msg:
note = '%s %s' % (note, self.msg)
for item in ['|', '/', '-', '\\']:
sys.stdout.write('\rProcessing - [ %s ] - %s ' % (item, note))
sys.stdout.flush()
time.sleep(.1)
self.run = self.run | [
"def",
"indicator",
"(",
"self",
")",
":",
"while",
"self",
".",
"run",
":",
"try",
":",
"size",
"=",
"self",
".",
"work_q",
".",
"qsize",
"(",
")",
"except",
"Exception",
":",
"note",
"=",
"'Please wait '",
"else",
":",
"note",
"=",
"'Number of Jobs in Queue = %s '",
"%",
"size",
"if",
"self",
".",
"msg",
":",
"note",
"=",
"'%s %s'",
"%",
"(",
"note",
",",
"self",
".",
"msg",
")",
"for",
"item",
"in",
"[",
"'|'",
",",
"'/'",
",",
"'-'",
",",
"'\\\\'",
"]",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\rProcessing - [ %s ] - %s '",
"%",
"(",
"item",
",",
"note",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"time",
".",
"sleep",
"(",
".1",
")",
"self",
".",
"run",
"=",
"self",
".",
"run"
]
| 30.263158 | 17.421053 |
def get_task(
self,
taskName):
"""*recursively scan this taskpaper object to find a descendant task by name*
**Key Arguments:**
- ``taskName`` -- the name, or title, of the task you want to return
**Return:**
- ``task`` -- the taskpaper task object you requested (or ``None`` if no task was matched)
**Usage:**
.. code-block:: python
aTask = doc.get_task("cut the grass")
"""
if taskName[:2] != "- ":
taskName = "- " + taskName
task = None
try:
self.refresh
except:
pass
# SEARCH TASKS
for t in self.tasks:
if t.title.lower() == taskName.lower() and task == None:
task = t
break
# SEARCH TASK CONTENTS
if task == None:
for t in self.tasks:
task = t.get_task(taskName)
if task:
break
# SEARCH PROJECT CONTENTS
if task == None and "<Task " not in self.__repr__():
for p in self.projects:
task = p.get_task(taskName)
if task:
break
return task | [
"def",
"get_task",
"(",
"self",
",",
"taskName",
")",
":",
"if",
"taskName",
"[",
":",
"2",
"]",
"!=",
"\"- \"",
":",
"taskName",
"=",
"\"- \"",
"+",
"taskName",
"task",
"=",
"None",
"try",
":",
"self",
".",
"refresh",
"except",
":",
"pass",
"# SEARCH TASKS",
"for",
"t",
"in",
"self",
".",
"tasks",
":",
"if",
"t",
".",
"title",
".",
"lower",
"(",
")",
"==",
"taskName",
".",
"lower",
"(",
")",
"and",
"task",
"==",
"None",
":",
"task",
"=",
"t",
"break",
"# SEARCH TASK CONTENTS",
"if",
"task",
"==",
"None",
":",
"for",
"t",
"in",
"self",
".",
"tasks",
":",
"task",
"=",
"t",
".",
"get_task",
"(",
"taskName",
")",
"if",
"task",
":",
"break",
"# SEARCH PROJECT CONTENTS",
"if",
"task",
"==",
"None",
"and",
"\"<Task \"",
"not",
"in",
"self",
".",
"__repr__",
"(",
")",
":",
"for",
"p",
"in",
"self",
".",
"projects",
":",
"task",
"=",
"p",
".",
"get_task",
"(",
"taskName",
")",
"if",
"task",
":",
"break",
"return",
"task"
]
| 25.87234 | 21.659574 |
def dispatch(self, key):
""" Get a seq of Commandchain objects that match key """
if key in self.strs:
yield self.strs[key]
for r, obj in self.regexs.items():
if re.match(r, key):
yield obj
else:
#print "nomatch",key # dbg
pass | [
"def",
"dispatch",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"in",
"self",
".",
"strs",
":",
"yield",
"self",
".",
"strs",
"[",
"key",
"]",
"for",
"r",
",",
"obj",
"in",
"self",
".",
"regexs",
".",
"items",
"(",
")",
":",
"if",
"re",
".",
"match",
"(",
"r",
",",
"key",
")",
":",
"yield",
"obj",
"else",
":",
"#print \"nomatch\",key # dbg",
"pass"
]
| 29.727273 | 13.363636 |
def change_mime(self, bucket, key, mime):
"""修改文件mimeType:
主动修改指定资源的文件类型,具体规格参考:
http://developer.qiniu.com/docs/v6/api/reference/rs/chgm.html
Args:
bucket: 待操作资源所在空间
key: 待操作资源文件名
mime: 待操作文件目标mimeType
"""
resource = entry(bucket, key)
encode_mime = urlsafe_base64_encode(mime)
return self.__rs_do('chgm', resource, 'mime/{0}'.format(encode_mime)) | [
"def",
"change_mime",
"(",
"self",
",",
"bucket",
",",
"key",
",",
"mime",
")",
":",
"resource",
"=",
"entry",
"(",
"bucket",
",",
"key",
")",
"encode_mime",
"=",
"urlsafe_base64_encode",
"(",
"mime",
")",
"return",
"self",
".",
"__rs_do",
"(",
"'chgm'",
",",
"resource",
",",
"'mime/{0}'",
".",
"format",
"(",
"encode_mime",
")",
")"
]
| 31.571429 | 16.071429 |
def custom_auth(principal, credentials, realm, scheme, **parameters):
""" Generate a basic auth token for a given user and password.
:param principal: specifies who is being authenticated
:param credentials: authenticates the principal
:param realm: specifies the authentication provider
:param scheme: specifies the type of authentication
:param parameters: parameters passed along to the authentication provider
:return: auth token for use with :meth:`GraphDatabase.driver`
"""
from neobolt.security import AuthToken
return AuthToken(scheme, principal, credentials, realm, **parameters) | [
"def",
"custom_auth",
"(",
"principal",
",",
"credentials",
",",
"realm",
",",
"scheme",
",",
"*",
"*",
"parameters",
")",
":",
"from",
"neobolt",
".",
"security",
"import",
"AuthToken",
"return",
"AuthToken",
"(",
"scheme",
",",
"principal",
",",
"credentials",
",",
"realm",
",",
"*",
"*",
"parameters",
")"
]
| 51.5 | 18.75 |
def ReadClientStartupInfo(self, client_id, cursor=None):
"""Reads the latest client startup record for a single client."""
query = (
"SELECT startup_info, UNIX_TIMESTAMP(timestamp) "
"FROM clients, client_startup_history "
"WHERE clients.last_startup_timestamp=client_startup_history.timestamp "
"AND clients.client_id=client_startup_history.client_id "
"AND clients.client_id=%s")
cursor.execute(query, [db_utils.ClientIDToInt(client_id)])
row = cursor.fetchone()
if row is None:
return None
startup_info, timestamp = row
res = rdf_client.StartupInfo.FromSerializedString(startup_info)
res.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)
return res | [
"def",
"ReadClientStartupInfo",
"(",
"self",
",",
"client_id",
",",
"cursor",
"=",
"None",
")",
":",
"query",
"=",
"(",
"\"SELECT startup_info, UNIX_TIMESTAMP(timestamp) \"",
"\"FROM clients, client_startup_history \"",
"\"WHERE clients.last_startup_timestamp=client_startup_history.timestamp \"",
"\"AND clients.client_id=client_startup_history.client_id \"",
"\"AND clients.client_id=%s\"",
")",
"cursor",
".",
"execute",
"(",
"query",
",",
"[",
"db_utils",
".",
"ClientIDToInt",
"(",
"client_id",
")",
"]",
")",
"row",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"if",
"row",
"is",
"None",
":",
"return",
"None",
"startup_info",
",",
"timestamp",
"=",
"row",
"res",
"=",
"rdf_client",
".",
"StartupInfo",
".",
"FromSerializedString",
"(",
"startup_info",
")",
"res",
".",
"timestamp",
"=",
"mysql_utils",
".",
"TimestampToRDFDatetime",
"(",
"timestamp",
")",
"return",
"res"
]
| 42.705882 | 20.058824 |
def get_docstring(filename, verbose=False):
"""
Search for assignment of the DOCUMENTATION variable in the given file.
Parse that from YAML and return the YAML doc or None.
"""
doc = None
try:
# Thank you, Habbie, for this bit of code :-)
M = ast.parse(''.join(open(filename)))
for child in M.body:
if isinstance(child, ast.Assign):
if 'DOCUMENTATION' in (t.id for t in child.targets):
doc = yaml.load(child.value.s)
except:
if verbose == True:
traceback.print_exc()
print "unable to parse %s" % filename
return doc | [
"def",
"get_docstring",
"(",
"filename",
",",
"verbose",
"=",
"False",
")",
":",
"doc",
"=",
"None",
"try",
":",
"# Thank you, Habbie, for this bit of code :-)",
"M",
"=",
"ast",
".",
"parse",
"(",
"''",
".",
"join",
"(",
"open",
"(",
"filename",
")",
")",
")",
"for",
"child",
"in",
"M",
".",
"body",
":",
"if",
"isinstance",
"(",
"child",
",",
"ast",
".",
"Assign",
")",
":",
"if",
"'DOCUMENTATION'",
"in",
"(",
"t",
".",
"id",
"for",
"t",
"in",
"child",
".",
"targets",
")",
":",
"doc",
"=",
"yaml",
".",
"load",
"(",
"child",
".",
"value",
".",
"s",
")",
"except",
":",
"if",
"verbose",
"==",
"True",
":",
"traceback",
".",
"print_exc",
"(",
")",
"print",
"\"unable to parse %s\"",
"%",
"filename",
"return",
"doc"
]
| 31.7 | 17.5 |
def as_iterable(value, wrap_maps=True, wrap_sets=False, itertype=tuple):
"""Wraps a single non-iterable value with a tuple (or other iterable type,
if ``itertype`` is provided.)
>>> as_iterable("abc")
("abc",)
>>> as_iterable(("abc",))
("abc",)
Equivalent to::
if is_iterable(value, not wrap_maps, not wrap_sets):
return value
else:
return itertype(value)
"""
if is_iterable(value, not wrap_maps, not wrap_sets):
return value
else:
return itertype(value) | [
"def",
"as_iterable",
"(",
"value",
",",
"wrap_maps",
"=",
"True",
",",
"wrap_sets",
"=",
"False",
",",
"itertype",
"=",
"tuple",
")",
":",
"if",
"is_iterable",
"(",
"value",
",",
"not",
"wrap_maps",
",",
"not",
"wrap_sets",
")",
":",
"return",
"value",
"else",
":",
"return",
"itertype",
"(",
"value",
")"
]
| 27.571429 | 19.190476 |
def _set_closed(self, future):
"""
Indicate that the instance is effectively closed.
:param future: The close future.
"""
logger.debug("%s[%s] closed.", self.__class__.__name__, id(self))
self.on_closed.emit(self)
self._closed_future.set_result(future.result()) | [
"def",
"_set_closed",
"(",
"self",
",",
"future",
")",
":",
"logger",
".",
"debug",
"(",
"\"%s[%s] closed.\"",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"id",
"(",
"self",
")",
")",
"self",
".",
"on_closed",
".",
"emit",
"(",
"self",
")",
"self",
".",
"_closed_future",
".",
"set_result",
"(",
"future",
".",
"result",
"(",
")",
")"
]
| 34.444444 | 13.555556 |
def _add_to_schema(self, field_name, schema):
"""Set the ``attribute`` attr to the field in question so this always
gets deserialzed into the field name without ``_id``.
Args:
field_name (str): The name of the field (the attribute name being
set in the schema).
schema (marshmallow.Schema): The actual parent schema this field
belongs to.
"""
super(ForeignKeyField, self)._add_to_schema(field_name, schema)
if self.get_field_value('convert_fks', default=True):
self.attribute = field_name.replace('_id', '') | [
"def",
"_add_to_schema",
"(",
"self",
",",
"field_name",
",",
"schema",
")",
":",
"super",
"(",
"ForeignKeyField",
",",
"self",
")",
".",
"_add_to_schema",
"(",
"field_name",
",",
"schema",
")",
"if",
"self",
".",
"get_field_value",
"(",
"'convert_fks'",
",",
"default",
"=",
"True",
")",
":",
"self",
".",
"attribute",
"=",
"field_name",
".",
"replace",
"(",
"'_id'",
",",
"''",
")"
]
| 43.642857 | 21.071429 |
def run(self, incremental=None, run_id=None):
"""Queue the execution of a particular crawler."""
state = {
'crawler': self.name,
'run_id': run_id,
'incremental': settings.INCREMENTAL
}
if incremental is not None:
state['incremental'] = incremental
# Cancel previous runs:
self.cancel()
# Flush out previous events:
Event.delete(self)
Queue.queue(self.init_stage, state, {}) | [
"def",
"run",
"(",
"self",
",",
"incremental",
"=",
"None",
",",
"run_id",
"=",
"None",
")",
":",
"state",
"=",
"{",
"'crawler'",
":",
"self",
".",
"name",
",",
"'run_id'",
":",
"run_id",
",",
"'incremental'",
":",
"settings",
".",
"INCREMENTAL",
"}",
"if",
"incremental",
"is",
"not",
"None",
":",
"state",
"[",
"'incremental'",
"]",
"=",
"incremental",
"# Cancel previous runs:",
"self",
".",
"cancel",
"(",
")",
"# Flush out previous events:",
"Event",
".",
"delete",
"(",
"self",
")",
"Queue",
".",
"queue",
"(",
"self",
".",
"init_stage",
",",
"state",
",",
"{",
"}",
")"
]
| 32 | 12.533333 |
def default_vsan_policy_configured(name, policy):
'''
Configures the default VSAN policy on a vCenter.
The state assumes there is only one default VSAN policy on a vCenter.
policy
Dict representation of a policy
'''
# TODO Refactor when recurse_differ supports list_differ
# It's going to make the whole thing much easier
policy_copy = copy.deepcopy(policy)
proxy_type = __salt__['vsphere.get_proxy_type']()
log.trace('proxy_type = %s', proxy_type)
# All allowed proxies have a shim execution module with the same
# name which implementes a get_details function
# All allowed proxies have a vcenter detail
vcenter = __salt__['{0}.get_details'.format(proxy_type)]()['vcenter']
log.info('Running %s on vCenter \'%s\'', name, vcenter)
log.trace('policy = %s', policy)
changes_required = False
ret = {'name': name,
'changes': {},
'result': None,
'comment': None}
comments = []
changes = {}
changes_required = False
si = None
try:
#TODO policy schema validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
current_policy = __salt__['vsphere.list_default_vsan_policy'](si)
log.trace('current_policy = %s', current_policy)
# Building all diffs between the current and expected policy
# XXX We simplify the comparison by assuming we have at most 1
# sub_profile
if policy.get('subprofiles'):
if len(policy['subprofiles']) > 1:
raise ArgumentValueError('Multiple sub_profiles ({0}) are not '
'supported in the input policy')
subprofile = policy['subprofiles'][0]
current_subprofile = current_policy['subprofiles'][0]
capabilities_differ = list_diff(current_subprofile['capabilities'],
subprofile.get('capabilities', []),
key='id')
del policy['subprofiles']
if subprofile.get('capabilities'):
del subprofile['capabilities']
del current_subprofile['capabilities']
# Get the subprofile diffs without the capability keys
subprofile_differ = recursive_diff(current_subprofile,
dict(subprofile))
del current_policy['subprofiles']
policy_differ = recursive_diff(current_policy, policy)
if policy_differ.diffs or capabilities_differ.diffs or \
subprofile_differ.diffs:
if 'name' in policy_differ.new_values or \
'description' in policy_differ.new_values:
raise ArgumentValueError(
'\'name\' and \'description\' of the default VSAN policy '
'cannot be updated')
changes_required = True
if __opts__['test']:
str_changes = []
if policy_differ.diffs:
str_changes.extend([change for change in
policy_differ.changes_str.split('\n')])
if subprofile_differ.diffs or capabilities_differ.diffs:
str_changes.append('subprofiles:')
if subprofile_differ.diffs:
str_changes.extend(
[' {0}'.format(change) for change in
subprofile_differ.changes_str.split('\n')])
if capabilities_differ.diffs:
str_changes.append(' capabilities:')
str_changes.extend(
[' {0}'.format(change) for change in
capabilities_differ.changes_str2.split('\n')])
comments.append(
'State {0} will update the default VSAN policy on '
'vCenter \'{1}\':\n{2}'
''.format(name, vcenter, '\n'.join(str_changes)))
else:
__salt__['vsphere.update_storage_policy'](
policy=current_policy['name'],
policy_dict=policy_copy,
service_instance=si)
comments.append('Updated the default VSAN policy in vCenter '
'\'{0}\''.format(vcenter))
log.info(comments[-1])
new_values = policy_differ.new_values
new_values['subprofiles'] = [subprofile_differ.new_values]
new_values['subprofiles'][0]['capabilities'] = \
capabilities_differ.new_values
if not new_values['subprofiles'][0]['capabilities']:
del new_values['subprofiles'][0]['capabilities']
if not new_values['subprofiles'][0]:
del new_values['subprofiles']
old_values = policy_differ.old_values
old_values['subprofiles'] = [subprofile_differ.old_values]
old_values['subprofiles'][0]['capabilities'] = \
capabilities_differ.old_values
if not old_values['subprofiles'][0]['capabilities']:
del old_values['subprofiles'][0]['capabilities']
if not old_values['subprofiles'][0]:
del old_values['subprofiles']
changes.update({'default_vsan_policy':
{'new': new_values,
'old': old_values}})
log.trace(changes)
__salt__['vsphere.disconnect'](si)
except CommandExecutionError as exc:
log.error('Error: %s', exc)
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
if not changes_required:
# We have no changes
ret.update({'comment': ('Default VSAN policy in vCenter '
'\'{0}\' is correctly configured. '
'Nothing to be done.'.format(vcenter)),
'result': True})
else:
ret.update({
'comment': '\n'.join(comments),
'changes': changes,
'result': None if __opts__['test'] else True,
})
return ret | [
"def",
"default_vsan_policy_configured",
"(",
"name",
",",
"policy",
")",
":",
"# TODO Refactor when recurse_differ supports list_differ",
"# It's going to make the whole thing much easier",
"policy_copy",
"=",
"copy",
".",
"deepcopy",
"(",
"policy",
")",
"proxy_type",
"=",
"__salt__",
"[",
"'vsphere.get_proxy_type'",
"]",
"(",
")",
"log",
".",
"trace",
"(",
"'proxy_type = %s'",
",",
"proxy_type",
")",
"# All allowed proxies have a shim execution module with the same",
"# name which implementes a get_details function",
"# All allowed proxies have a vcenter detail",
"vcenter",
"=",
"__salt__",
"[",
"'{0}.get_details'",
".",
"format",
"(",
"proxy_type",
")",
"]",
"(",
")",
"[",
"'vcenter'",
"]",
"log",
".",
"info",
"(",
"'Running %s on vCenter \\'%s\\''",
",",
"name",
",",
"vcenter",
")",
"log",
".",
"trace",
"(",
"'policy = %s'",
",",
"policy",
")",
"changes_required",
"=",
"False",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"None",
"}",
"comments",
"=",
"[",
"]",
"changes",
"=",
"{",
"}",
"changes_required",
"=",
"False",
"si",
"=",
"None",
"try",
":",
"#TODO policy schema validation",
"si",
"=",
"__salt__",
"[",
"'vsphere.get_service_instance_via_proxy'",
"]",
"(",
")",
"current_policy",
"=",
"__salt__",
"[",
"'vsphere.list_default_vsan_policy'",
"]",
"(",
"si",
")",
"log",
".",
"trace",
"(",
"'current_policy = %s'",
",",
"current_policy",
")",
"# Building all diffs between the current and expected policy",
"# XXX We simplify the comparison by assuming we have at most 1",
"# sub_profile",
"if",
"policy",
".",
"get",
"(",
"'subprofiles'",
")",
":",
"if",
"len",
"(",
"policy",
"[",
"'subprofiles'",
"]",
")",
">",
"1",
":",
"raise",
"ArgumentValueError",
"(",
"'Multiple sub_profiles ({0}) are not '",
"'supported in the input policy'",
")",
"subprofile",
"=",
"policy",
"[",
"'subprofiles'",
"]",
"[",
"0",
"]",
"current_subprofile",
"=",
"current_policy",
"[",
"'subprofiles'",
"]",
"[",
"0",
"]",
"capabilities_differ",
"=",
"list_diff",
"(",
"current_subprofile",
"[",
"'capabilities'",
"]",
",",
"subprofile",
".",
"get",
"(",
"'capabilities'",
",",
"[",
"]",
")",
",",
"key",
"=",
"'id'",
")",
"del",
"policy",
"[",
"'subprofiles'",
"]",
"if",
"subprofile",
".",
"get",
"(",
"'capabilities'",
")",
":",
"del",
"subprofile",
"[",
"'capabilities'",
"]",
"del",
"current_subprofile",
"[",
"'capabilities'",
"]",
"# Get the subprofile diffs without the capability keys",
"subprofile_differ",
"=",
"recursive_diff",
"(",
"current_subprofile",
",",
"dict",
"(",
"subprofile",
")",
")",
"del",
"current_policy",
"[",
"'subprofiles'",
"]",
"policy_differ",
"=",
"recursive_diff",
"(",
"current_policy",
",",
"policy",
")",
"if",
"policy_differ",
".",
"diffs",
"or",
"capabilities_differ",
".",
"diffs",
"or",
"subprofile_differ",
".",
"diffs",
":",
"if",
"'name'",
"in",
"policy_differ",
".",
"new_values",
"or",
"'description'",
"in",
"policy_differ",
".",
"new_values",
":",
"raise",
"ArgumentValueError",
"(",
"'\\'name\\' and \\'description\\' of the default VSAN policy '",
"'cannot be updated'",
")",
"changes_required",
"=",
"True",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"str_changes",
"=",
"[",
"]",
"if",
"policy_differ",
".",
"diffs",
":",
"str_changes",
".",
"extend",
"(",
"[",
"change",
"for",
"change",
"in",
"policy_differ",
".",
"changes_str",
".",
"split",
"(",
"'\\n'",
")",
"]",
")",
"if",
"subprofile_differ",
".",
"diffs",
"or",
"capabilities_differ",
".",
"diffs",
":",
"str_changes",
".",
"append",
"(",
"'subprofiles:'",
")",
"if",
"subprofile_differ",
".",
"diffs",
":",
"str_changes",
".",
"extend",
"(",
"[",
"' {0}'",
".",
"format",
"(",
"change",
")",
"for",
"change",
"in",
"subprofile_differ",
".",
"changes_str",
".",
"split",
"(",
"'\\n'",
")",
"]",
")",
"if",
"capabilities_differ",
".",
"diffs",
":",
"str_changes",
".",
"append",
"(",
"' capabilities:'",
")",
"str_changes",
".",
"extend",
"(",
"[",
"' {0}'",
".",
"format",
"(",
"change",
")",
"for",
"change",
"in",
"capabilities_differ",
".",
"changes_str2",
".",
"split",
"(",
"'\\n'",
")",
"]",
")",
"comments",
".",
"append",
"(",
"'State {0} will update the default VSAN policy on '",
"'vCenter \\'{1}\\':\\n{2}'",
"''",
".",
"format",
"(",
"name",
",",
"vcenter",
",",
"'\\n'",
".",
"join",
"(",
"str_changes",
")",
")",
")",
"else",
":",
"__salt__",
"[",
"'vsphere.update_storage_policy'",
"]",
"(",
"policy",
"=",
"current_policy",
"[",
"'name'",
"]",
",",
"policy_dict",
"=",
"policy_copy",
",",
"service_instance",
"=",
"si",
")",
"comments",
".",
"append",
"(",
"'Updated the default VSAN policy in vCenter '",
"'\\'{0}\\''",
".",
"format",
"(",
"vcenter",
")",
")",
"log",
".",
"info",
"(",
"comments",
"[",
"-",
"1",
"]",
")",
"new_values",
"=",
"policy_differ",
".",
"new_values",
"new_values",
"[",
"'subprofiles'",
"]",
"=",
"[",
"subprofile_differ",
".",
"new_values",
"]",
"new_values",
"[",
"'subprofiles'",
"]",
"[",
"0",
"]",
"[",
"'capabilities'",
"]",
"=",
"capabilities_differ",
".",
"new_values",
"if",
"not",
"new_values",
"[",
"'subprofiles'",
"]",
"[",
"0",
"]",
"[",
"'capabilities'",
"]",
":",
"del",
"new_values",
"[",
"'subprofiles'",
"]",
"[",
"0",
"]",
"[",
"'capabilities'",
"]",
"if",
"not",
"new_values",
"[",
"'subprofiles'",
"]",
"[",
"0",
"]",
":",
"del",
"new_values",
"[",
"'subprofiles'",
"]",
"old_values",
"=",
"policy_differ",
".",
"old_values",
"old_values",
"[",
"'subprofiles'",
"]",
"=",
"[",
"subprofile_differ",
".",
"old_values",
"]",
"old_values",
"[",
"'subprofiles'",
"]",
"[",
"0",
"]",
"[",
"'capabilities'",
"]",
"=",
"capabilities_differ",
".",
"old_values",
"if",
"not",
"old_values",
"[",
"'subprofiles'",
"]",
"[",
"0",
"]",
"[",
"'capabilities'",
"]",
":",
"del",
"old_values",
"[",
"'subprofiles'",
"]",
"[",
"0",
"]",
"[",
"'capabilities'",
"]",
"if",
"not",
"old_values",
"[",
"'subprofiles'",
"]",
"[",
"0",
"]",
":",
"del",
"old_values",
"[",
"'subprofiles'",
"]",
"changes",
".",
"update",
"(",
"{",
"'default_vsan_policy'",
":",
"{",
"'new'",
":",
"new_values",
",",
"'old'",
":",
"old_values",
"}",
"}",
")",
"log",
".",
"trace",
"(",
"changes",
")",
"__salt__",
"[",
"'vsphere.disconnect'",
"]",
"(",
"si",
")",
"except",
"CommandExecutionError",
"as",
"exc",
":",
"log",
".",
"error",
"(",
"'Error: %s'",
",",
"exc",
")",
"if",
"si",
":",
"__salt__",
"[",
"'vsphere.disconnect'",
"]",
"(",
"si",
")",
"if",
"not",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
".",
"update",
"(",
"{",
"'comment'",
":",
"exc",
".",
"strerror",
",",
"'result'",
":",
"False",
"if",
"not",
"__opts__",
"[",
"'test'",
"]",
"else",
"None",
"}",
")",
"return",
"ret",
"if",
"not",
"changes_required",
":",
"# We have no changes",
"ret",
".",
"update",
"(",
"{",
"'comment'",
":",
"(",
"'Default VSAN policy in vCenter '",
"'\\'{0}\\' is correctly configured. '",
"'Nothing to be done.'",
".",
"format",
"(",
"vcenter",
")",
")",
",",
"'result'",
":",
"True",
"}",
")",
"else",
":",
"ret",
".",
"update",
"(",
"{",
"'comment'",
":",
"'\\n'",
".",
"join",
"(",
"comments",
")",
",",
"'changes'",
":",
"changes",
",",
"'result'",
":",
"None",
"if",
"__opts__",
"[",
"'test'",
"]",
"else",
"True",
",",
"}",
")",
"return",
"ret"
]
| 45.471014 | 17.688406 |
def _open_ftp(self):
# type: () -> FTP
"""Open an ftp object for the file."""
ftp = self.fs._open_ftp()
ftp.voidcmd(str("TYPE I"))
return ftp | [
"def",
"_open_ftp",
"(",
"self",
")",
":",
"# type: () -> FTP",
"ftp",
"=",
"self",
".",
"fs",
".",
"_open_ftp",
"(",
")",
"ftp",
".",
"voidcmd",
"(",
"str",
"(",
"\"TYPE I\"",
")",
")",
"return",
"ftp"
]
| 29.333333 | 11.666667 |
def construct_zernike_polynomials(x, y, zernike_indexes, mask=None, weight=None):
"""Return the zerike polynomials for all objects in an image
x - the X distance of a point from the center of its object
y - the Y distance of a point from the center of its object
zernike_indexes - an Nx2 array of the Zernike polynomials to be computed.
mask - a mask with same shape as X and Y of the points to consider
weight - weightings of points with the same shape as X and Y (default
weight on each point is 1).
returns a height x width x N array of complex numbers which are the
e^i portion of the sine and cosine of the Zernikes
"""
if x.shape != y.shape:
raise ValueError("X and Y must have the same shape")
if mask is None:
pass
elif mask.shape != x.shape:
raise ValueError("The mask must have the same shape as X and Y")
else:
x = x[mask]
y = y[mask]
if weight is not None:
weight = weight[mask]
lut = construct_zernike_lookuptable(zernike_indexes) # precompute poly. coeffs.
nzernikes = zernike_indexes.shape[0]
# compute radii
r_square = np.square(x) # r_square = x**2
np.add(r_square, np.square(y), out=r_square) # r_square = x**2 + y**2
# z = y + 1j*x
# each Zernike polynomial is poly(r)*(r**m * np.exp(1j*m*phi)) ==
# poly(r)*(y + 1j*x)**m
z = np.empty(x.shape, np.complex)
np.copyto(z.real, y)
np.copyto(z.imag, x)
# preallocate buffers
s = np.empty_like(x)
zf = np.zeros((nzernikes,) + x.shape, np.complex)
z_pows = {}
for idx, (n, m) in enumerate(zernike_indexes):
s[:]=0
if not m in z_pows:
if m == 0:
z_pows[m] = np.complex(1.0)
else:
z_pows[m] = z if m == 1 else (z ** m)
z_pow = z_pows[m]
# use Horner scheme
for k in range((n-m)//2+1):
s *= r_square
s += lut[idx, k]
s[r_square>1]=0
if weight is not None:
s *= weight.astype(s.dtype)
if m == 0:
np.copyto(zf[idx], s) # zf[idx] = s
else:
np.multiply(s, z_pow, out=zf[idx]) # zf[idx] = s*exp_term
if mask is None:
result = zf.transpose( tuple(range(1, 1+x.ndim)) + (0, ))
else:
result = np.zeros( mask.shape + (nzernikes,), np.complex)
result[mask] = zf.transpose( tuple(range(1, 1 + x.ndim)) + (0, ))
return result | [
"def",
"construct_zernike_polynomials",
"(",
"x",
",",
"y",
",",
"zernike_indexes",
",",
"mask",
"=",
"None",
",",
"weight",
"=",
"None",
")",
":",
"if",
"x",
".",
"shape",
"!=",
"y",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"X and Y must have the same shape\"",
")",
"if",
"mask",
"is",
"None",
":",
"pass",
"elif",
"mask",
".",
"shape",
"!=",
"x",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"The mask must have the same shape as X and Y\"",
")",
"else",
":",
"x",
"=",
"x",
"[",
"mask",
"]",
"y",
"=",
"y",
"[",
"mask",
"]",
"if",
"weight",
"is",
"not",
"None",
":",
"weight",
"=",
"weight",
"[",
"mask",
"]",
"lut",
"=",
"construct_zernike_lookuptable",
"(",
"zernike_indexes",
")",
"# precompute poly. coeffs.",
"nzernikes",
"=",
"zernike_indexes",
".",
"shape",
"[",
"0",
"]",
"# compute radii",
"r_square",
"=",
"np",
".",
"square",
"(",
"x",
")",
"# r_square = x**2",
"np",
".",
"add",
"(",
"r_square",
",",
"np",
".",
"square",
"(",
"y",
")",
",",
"out",
"=",
"r_square",
")",
"# r_square = x**2 + y**2",
"# z = y + 1j*x",
"# each Zernike polynomial is poly(r)*(r**m * np.exp(1j*m*phi)) ==",
"# poly(r)*(y + 1j*x)**m",
"z",
"=",
"np",
".",
"empty",
"(",
"x",
".",
"shape",
",",
"np",
".",
"complex",
")",
"np",
".",
"copyto",
"(",
"z",
".",
"real",
",",
"y",
")",
"np",
".",
"copyto",
"(",
"z",
".",
"imag",
",",
"x",
")",
"# preallocate buffers",
"s",
"=",
"np",
".",
"empty_like",
"(",
"x",
")",
"zf",
"=",
"np",
".",
"zeros",
"(",
"(",
"nzernikes",
",",
")",
"+",
"x",
".",
"shape",
",",
"np",
".",
"complex",
")",
"z_pows",
"=",
"{",
"}",
"for",
"idx",
",",
"(",
"n",
",",
"m",
")",
"in",
"enumerate",
"(",
"zernike_indexes",
")",
":",
"s",
"[",
":",
"]",
"=",
"0",
"if",
"not",
"m",
"in",
"z_pows",
":",
"if",
"m",
"==",
"0",
":",
"z_pows",
"[",
"m",
"]",
"=",
"np",
".",
"complex",
"(",
"1.0",
")",
"else",
":",
"z_pows",
"[",
"m",
"]",
"=",
"z",
"if",
"m",
"==",
"1",
"else",
"(",
"z",
"**",
"m",
")",
"z_pow",
"=",
"z_pows",
"[",
"m",
"]",
"# use Horner scheme",
"for",
"k",
"in",
"range",
"(",
"(",
"n",
"-",
"m",
")",
"//",
"2",
"+",
"1",
")",
":",
"s",
"*=",
"r_square",
"s",
"+=",
"lut",
"[",
"idx",
",",
"k",
"]",
"s",
"[",
"r_square",
">",
"1",
"]",
"=",
"0",
"if",
"weight",
"is",
"not",
"None",
":",
"s",
"*=",
"weight",
".",
"astype",
"(",
"s",
".",
"dtype",
")",
"if",
"m",
"==",
"0",
":",
"np",
".",
"copyto",
"(",
"zf",
"[",
"idx",
"]",
",",
"s",
")",
"# zf[idx] = s",
"else",
":",
"np",
".",
"multiply",
"(",
"s",
",",
"z_pow",
",",
"out",
"=",
"zf",
"[",
"idx",
"]",
")",
"# zf[idx] = s*exp_term",
"if",
"mask",
"is",
"None",
":",
"result",
"=",
"zf",
".",
"transpose",
"(",
"tuple",
"(",
"range",
"(",
"1",
",",
"1",
"+",
"x",
".",
"ndim",
")",
")",
"+",
"(",
"0",
",",
")",
")",
"else",
":",
"result",
"=",
"np",
".",
"zeros",
"(",
"mask",
".",
"shape",
"+",
"(",
"nzernikes",
",",
")",
",",
"np",
".",
"complex",
")",
"result",
"[",
"mask",
"]",
"=",
"zf",
".",
"transpose",
"(",
"tuple",
"(",
"range",
"(",
"1",
",",
"1",
"+",
"x",
".",
"ndim",
")",
")",
"+",
"(",
"0",
",",
")",
")",
"return",
"result"
]
| 37.892308 | 19.384615 |
def connect_post_namespaced_pod_exec(self, name, namespace, **kwargs): # noqa: E501
"""connect_post_namespaced_pod_exec # noqa: E501
connect POST requests to exec of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_post_namespaced_pod_exec(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodExecOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str command: Command is the remote command to execute. argv array. Not executed within a shell.
:param str container: Container in which to execute the command. Defaults to only container if there is only one container in the pod.
:param bool stderr: Redirect the standard error stream of the pod for this call. Defaults to true.
:param bool stdin: Redirect the standard input stream of the pod for this call. Defaults to false.
:param bool stdout: Redirect the standard output stream of the pod for this call. Defaults to true.
:param bool tty: TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_post_namespaced_pod_exec_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.connect_post_namespaced_pod_exec_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | [
"def",
"connect_post_namespaced_pod_exec",
"(",
"self",
",",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"connect_post_namespaced_pod_exec_with_http_info",
"(",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"connect_post_namespaced_pod_exec_with_http_info",
"(",
"name",
",",
"namespace",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
]
| 64.535714 | 36.892857 |
def find_first_number(ll):
""" Returns nr of first entry parseable to float in ll, None otherwise"""
for nr, entry in enumerate(ll):
try:
float(entry)
except (ValueError, TypeError) as e:
pass
else:
return nr
return None | [
"def",
"find_first_number",
"(",
"ll",
")",
":",
"for",
"nr",
",",
"entry",
"in",
"enumerate",
"(",
"ll",
")",
":",
"try",
":",
"float",
"(",
"entry",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
"as",
"e",
":",
"pass",
"else",
":",
"return",
"nr",
"return",
"None"
]
| 28.3 | 16.2 |
async def create_stream_player(self, url, opts=ydl_opts):
"""Creates a streamer that plays from a URL"""
self.current_download_elapsed = 0
self.streamer = await self.vclient.create_ytdl_player(url, ytdl_options=opts, after=self.vafter_ts)
self.state = "ready"
await self.setup_streamer()
self.nowplayinglog.debug(self.streamer.title)
self.nowplayingauthorlog.debug(self.streamer.uploader if self.streamer.uploader is not None else "Unknown")
self.current_duration = 0
self.is_live = True
info = self.streamer.yt.extract_info(url, download=False)
self.nowplayingsourcelog.info(api_music.parse_source(info))
play_state = "Streaming" if self.is_live else "Playing"
await self.set_topic("{} {}".format(play_state, self.streamer.title))
self.statuslog.debug(play_state) | [
"async",
"def",
"create_stream_player",
"(",
"self",
",",
"url",
",",
"opts",
"=",
"ydl_opts",
")",
":",
"self",
".",
"current_download_elapsed",
"=",
"0",
"self",
".",
"streamer",
"=",
"await",
"self",
".",
"vclient",
".",
"create_ytdl_player",
"(",
"url",
",",
"ytdl_options",
"=",
"opts",
",",
"after",
"=",
"self",
".",
"vafter_ts",
")",
"self",
".",
"state",
"=",
"\"ready\"",
"await",
"self",
".",
"setup_streamer",
"(",
")",
"self",
".",
"nowplayinglog",
".",
"debug",
"(",
"self",
".",
"streamer",
".",
"title",
")",
"self",
".",
"nowplayingauthorlog",
".",
"debug",
"(",
"self",
".",
"streamer",
".",
"uploader",
"if",
"self",
".",
"streamer",
".",
"uploader",
"is",
"not",
"None",
"else",
"\"Unknown\"",
")",
"self",
".",
"current_duration",
"=",
"0",
"self",
".",
"is_live",
"=",
"True",
"info",
"=",
"self",
".",
"streamer",
".",
"yt",
".",
"extract_info",
"(",
"url",
",",
"download",
"=",
"False",
")",
"self",
".",
"nowplayingsourcelog",
".",
"info",
"(",
"api_music",
".",
"parse_source",
"(",
"info",
")",
")",
"play_state",
"=",
"\"Streaming\"",
"if",
"self",
".",
"is_live",
"else",
"\"Playing\"",
"await",
"self",
".",
"set_topic",
"(",
"\"{} {}\"",
".",
"format",
"(",
"play_state",
",",
"self",
".",
"streamer",
".",
"title",
")",
")",
"self",
".",
"statuslog",
".",
"debug",
"(",
"play_state",
")"
]
| 43.1 | 26.1 |
def build_status(namespace, name, branch='master') -> pin:
'''Returns the current status of the build'''
return ci_data(namespace, name, branch).get('build_success', None) | [
"def",
"build_status",
"(",
"namespace",
",",
"name",
",",
"branch",
"=",
"'master'",
")",
"->",
"pin",
":",
"return",
"ci_data",
"(",
"namespace",
",",
"name",
",",
"branch",
")",
".",
"get",
"(",
"'build_success'",
",",
"None",
")"
]
| 59 | 19 |
def step_interpolation(x, xp, fp, **kwargs):
"""Multi-dimensional step interpolation.
Returns the multi-dimensional step interpolant to a function with
given discrete data points (xp, fp), evaluated at x.
Note that *N and *M indicate zero or more dimensions.
Args:
x: An array of shape [*N], the x-coordinates of the interpolated values.
xp: An np.array of shape [D], the x-coordinates of the data points, must be
increasing.
fp: An np.array of shape [D, *M], the y-coordinates of the data points.
**kwargs: Unused.
Returns:
An array of shape [*N, *M], the interpolated values.
"""
del kwargs # Unused.
xp = np.expand_dims(xp, -1)
lower, upper = xp[:-1], xp[1:]
conditions = (x >= lower) & (x < upper)
# Underflow and overflow conditions and values. Values default to fp[0] and
# fp[-1] respectively.
conditions = np.concatenate([[x < xp[0]], conditions, [x >= xp[-1]]])
values = np.concatenate([[fp[0]], fp])
assert np.all(np.sum(conditions, 0) == 1), 'xp must be increasing.'
indices = np.argmax(conditions, 0)
return values[indices].astype(np.float32) | [
"def",
"step_interpolation",
"(",
"x",
",",
"xp",
",",
"fp",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"kwargs",
"# Unused.",
"xp",
"=",
"np",
".",
"expand_dims",
"(",
"xp",
",",
"-",
"1",
")",
"lower",
",",
"upper",
"=",
"xp",
"[",
":",
"-",
"1",
"]",
",",
"xp",
"[",
"1",
":",
"]",
"conditions",
"=",
"(",
"x",
">=",
"lower",
")",
"&",
"(",
"x",
"<",
"upper",
")",
"# Underflow and overflow conditions and values. Values default to fp[0] and",
"# fp[-1] respectively.",
"conditions",
"=",
"np",
".",
"concatenate",
"(",
"[",
"[",
"x",
"<",
"xp",
"[",
"0",
"]",
"]",
",",
"conditions",
",",
"[",
"x",
">=",
"xp",
"[",
"-",
"1",
"]",
"]",
"]",
")",
"values",
"=",
"np",
".",
"concatenate",
"(",
"[",
"[",
"fp",
"[",
"0",
"]",
"]",
",",
"fp",
"]",
")",
"assert",
"np",
".",
"all",
"(",
"np",
".",
"sum",
"(",
"conditions",
",",
"0",
")",
"==",
"1",
")",
",",
"'xp must be increasing.'",
"indices",
"=",
"np",
".",
"argmax",
"(",
"conditions",
",",
"0",
")",
"return",
"values",
"[",
"indices",
"]",
".",
"astype",
"(",
"np",
".",
"float32",
")"
]
| 37.689655 | 20.965517 |
def antiscia(self):
""" Returns antiscia object. """
obj = self.copy()
obj.type = const.OBJ_GENERIC
obj.relocate(360 - obj.lon + 180)
return obj | [
"def",
"antiscia",
"(",
"self",
")",
":",
"obj",
"=",
"self",
".",
"copy",
"(",
")",
"obj",
".",
"type",
"=",
"const",
".",
"OBJ_GENERIC",
"obj",
".",
"relocate",
"(",
"360",
"-",
"obj",
".",
"lon",
"+",
"180",
")",
"return",
"obj"
]
| 29.833333 | 10.5 |
def _synthesize(self):
"""
Assigns all placeholder labels to actual values and implicitly declares the ``ro``
register for backwards compatibility.
Changed in 1.9: Either all qubits must be defined or all undefined. If qubits are
undefined, this method will not help you. You must explicitly call `address_qubits`
which will return a new Program.
Changed in 1.9: This function now returns ``self`` and updates
``self._synthesized_instructions``.
Changed in 2.0: This function will add an instruction to the top of the program
to declare a register of bits called ``ro`` if and only if there are no other
declarations in the program.
:return: This object with the ``_synthesized_instructions`` member set.
"""
self._synthesized_instructions = instantiate_labels(self._instructions)
self._synthesized_instructions = implicitly_declare_ro(self._synthesized_instructions)
return self | [
"def",
"_synthesize",
"(",
"self",
")",
":",
"self",
".",
"_synthesized_instructions",
"=",
"instantiate_labels",
"(",
"self",
".",
"_instructions",
")",
"self",
".",
"_synthesized_instructions",
"=",
"implicitly_declare_ro",
"(",
"self",
".",
"_synthesized_instructions",
")",
"return",
"self"
]
| 47.190476 | 29.285714 |
def check_arg_compatibility(args: argparse.Namespace):
"""
Check if some arguments are incompatible with each other.
:param args: Arguments as returned by argparse.
"""
if args.lhuc is not None:
# Actually this check is a bit too strict
check_condition(args.encoder != C.CONVOLUTION_TYPE or args.decoder != C.CONVOLUTION_TYPE,
"LHUC is not supported for convolutional models yet.")
check_condition(args.decoder != C.TRANSFORMER_TYPE or C.LHUC_STATE_INIT not in args.lhuc,
"The %s options only applies to RNN models" % C.LHUC_STATE_INIT)
if args.decoder_only:
check_condition(args.decoder != C.TRANSFORMER_TYPE and args.decoder != C.CONVOLUTION_TYPE,
"Decoder pre-training currently supports RNN decoders only.") | [
"def",
"check_arg_compatibility",
"(",
"args",
":",
"argparse",
".",
"Namespace",
")",
":",
"if",
"args",
".",
"lhuc",
"is",
"not",
"None",
":",
"# Actually this check is a bit too strict",
"check_condition",
"(",
"args",
".",
"encoder",
"!=",
"C",
".",
"CONVOLUTION_TYPE",
"or",
"args",
".",
"decoder",
"!=",
"C",
".",
"CONVOLUTION_TYPE",
",",
"\"LHUC is not supported for convolutional models yet.\"",
")",
"check_condition",
"(",
"args",
".",
"decoder",
"!=",
"C",
".",
"TRANSFORMER_TYPE",
"or",
"C",
".",
"LHUC_STATE_INIT",
"not",
"in",
"args",
".",
"lhuc",
",",
"\"The %s options only applies to RNN models\"",
"%",
"C",
".",
"LHUC_STATE_INIT",
")",
"if",
"args",
".",
"decoder_only",
":",
"check_condition",
"(",
"args",
".",
"decoder",
"!=",
"C",
".",
"TRANSFORMER_TYPE",
"and",
"args",
".",
"decoder",
"!=",
"C",
".",
"CONVOLUTION_TYPE",
",",
"\"Decoder pre-training currently supports RNN decoders only.\"",
")"
]
| 48.588235 | 29.647059 |
def _handle_offset_response(self, response):
"""
Handle responses to both OffsetRequest and OffsetFetchRequest, since
they are similar enough.
:param response:
A tuple of a single OffsetFetchResponse or OffsetResponse
"""
# Got a response, clear our outstanding request deferred
self._request_d = None
# Successful request, reset our retry delay, count, etc
self.retry_delay = self.retry_init_delay
self._fetch_attempt_count = 1
response = response[0]
if hasattr(response, 'offsets'):
# It's a response to an OffsetRequest
self._fetch_offset = response.offsets[0]
else:
# It's a response to an OffsetFetchRequest
# Make sure we got a valid offset back. Kafka uses -1 to indicate
# no committed offset was retrieved
if response.offset == OFFSET_NOT_COMMITTED:
self._fetch_offset = OFFSET_EARLIEST
else:
self._fetch_offset = response.offset + 1
self._last_committed_offset = response.offset
self._do_fetch() | [
"def",
"_handle_offset_response",
"(",
"self",
",",
"response",
")",
":",
"# Got a response, clear our outstanding request deferred",
"self",
".",
"_request_d",
"=",
"None",
"# Successful request, reset our retry delay, count, etc",
"self",
".",
"retry_delay",
"=",
"self",
".",
"retry_init_delay",
"self",
".",
"_fetch_attempt_count",
"=",
"1",
"response",
"=",
"response",
"[",
"0",
"]",
"if",
"hasattr",
"(",
"response",
",",
"'offsets'",
")",
":",
"# It's a response to an OffsetRequest",
"self",
".",
"_fetch_offset",
"=",
"response",
".",
"offsets",
"[",
"0",
"]",
"else",
":",
"# It's a response to an OffsetFetchRequest",
"# Make sure we got a valid offset back. Kafka uses -1 to indicate",
"# no committed offset was retrieved",
"if",
"response",
".",
"offset",
"==",
"OFFSET_NOT_COMMITTED",
":",
"self",
".",
"_fetch_offset",
"=",
"OFFSET_EARLIEST",
"else",
":",
"self",
".",
"_fetch_offset",
"=",
"response",
".",
"offset",
"+",
"1",
"self",
".",
"_last_committed_offset",
"=",
"response",
".",
"offset",
"self",
".",
"_do_fetch",
"(",
")"
]
| 39.172414 | 17.241379 |
def command_getkeys(self, command, *args, encoding='utf-8'):
"""Extract keys given a full Redis command."""
return self.execute(b'COMMAND', b'GETKEYS', command, *args,
encoding=encoding) | [
"def",
"command_getkeys",
"(",
"self",
",",
"command",
",",
"*",
"args",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"return",
"self",
".",
"execute",
"(",
"b'COMMAND'",
",",
"b'GETKEYS'",
",",
"command",
",",
"*",
"args",
",",
"encoding",
"=",
"encoding",
")"
]
| 56.75 | 13.25 |
def get_object(self, cat, **kwargs):
"""
This method is used for retrieving objects from facebook. "cat", the category, must be
passed. When cat is "single", pass the "id "and desired "fields" of the single object. If the
cat is "multiple", only pass the "ids" of the objects to be fetched.
"""
if 'id' not in kwargs.keys():
kwargs['id']=''
res=request.get_object_cat1(self.con, self.token, cat, kwargs)
return res | [
"def",
"get_object",
"(",
"self",
",",
"cat",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'id'",
"not",
"in",
"kwargs",
".",
"keys",
"(",
")",
":",
"kwargs",
"[",
"'id'",
"]",
"=",
"''",
"res",
"=",
"request",
".",
"get_object_cat1",
"(",
"self",
".",
"con",
",",
"self",
".",
"token",
",",
"cat",
",",
"kwargs",
")",
"return",
"res"
]
| 56 | 23.8 |
def evaluate(self, test_file, save_dir=None, logger=None, num_buckets_test=10, test_batch_size=5000):
"""Run evaluation on test set
Parameters
----------
test_file : str
path to test set
save_dir : str
where to store intermediate results and log
logger : logging.logger
logger for printing results
num_buckets_test : int
number of clusters for sentences from test set
test_batch_size : int
batch size of test set
Returns
-------
tuple
UAS, LAS
"""
parser = self._parser
vocab = self._vocab
with mx.Context(mxnet_prefer_gpu()):
UAS, LAS, speed = evaluate_official_script(parser, vocab, num_buckets_test, test_batch_size,
test_file, os.path.join(save_dir, 'valid_tmp'))
if logger is None:
logger = init_logger(save_dir, 'test.log')
logger.info('Test: UAS %.2f%% LAS %.2f%% %d sents/s' % (UAS, LAS, speed))
return UAS, LAS | [
"def",
"evaluate",
"(",
"self",
",",
"test_file",
",",
"save_dir",
"=",
"None",
",",
"logger",
"=",
"None",
",",
"num_buckets_test",
"=",
"10",
",",
"test_batch_size",
"=",
"5000",
")",
":",
"parser",
"=",
"self",
".",
"_parser",
"vocab",
"=",
"self",
".",
"_vocab",
"with",
"mx",
".",
"Context",
"(",
"mxnet_prefer_gpu",
"(",
")",
")",
":",
"UAS",
",",
"LAS",
",",
"speed",
"=",
"evaluate_official_script",
"(",
"parser",
",",
"vocab",
",",
"num_buckets_test",
",",
"test_batch_size",
",",
"test_file",
",",
"os",
".",
"path",
".",
"join",
"(",
"save_dir",
",",
"'valid_tmp'",
")",
")",
"if",
"logger",
"is",
"None",
":",
"logger",
"=",
"init_logger",
"(",
"save_dir",
",",
"'test.log'",
")",
"logger",
".",
"info",
"(",
"'Test: UAS %.2f%% LAS %.2f%% %d sents/s'",
"%",
"(",
"UAS",
",",
"LAS",
",",
"speed",
")",
")",
"return",
"UAS",
",",
"LAS"
]
| 35.064516 | 21.903226 |
def _add_edge(self, idx, from_idx, from_lvec, to_idx, to_lvec):
"""
Add information about an edge linking two critical points.
This actually describes two edges:
from_idx ------ idx ------ to_idx
However, in practice, from_idx and to_idx will typically be
atom nuclei, with the center node (idx) referring to a bond
critical point. Thus, it will be more convenient to model
this as a single edge linking nuclei with the properties
of the bond critical point stored as an edge attribute.
:param idx: index of node
:param from_idx: from index of node
:param from_lvec: vector of lattice image the from node is in
as tuple of ints
:param to_idx: to index of node
:param to_lvec: vector of lattice image the to node is in as
tuple of ints
:return:
"""
self.edges[idx] = {'from_idx': from_idx, 'from_lvec': from_lvec,
'to_idx': to_idx, 'to_lvec': to_lvec} | [
"def",
"_add_edge",
"(",
"self",
",",
"idx",
",",
"from_idx",
",",
"from_lvec",
",",
"to_idx",
",",
"to_lvec",
")",
":",
"self",
".",
"edges",
"[",
"idx",
"]",
"=",
"{",
"'from_idx'",
":",
"from_idx",
",",
"'from_lvec'",
":",
"from_lvec",
",",
"'to_idx'",
":",
"to_idx",
",",
"'to_lvec'",
":",
"to_lvec",
"}"
]
| 40.4 | 20.88 |
def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image.
See :class:`~torchvision.transforms.ToPILImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
elif isinstance(pic, torch.Tensor):
if pic.ndimension() not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))
elif pic.ndimension() == 2:
# if 2D image, add channel dimension (CHW)
pic = pic.unsqueeze(0)
elif isinstance(pic, np.ndarray):
if pic.ndim not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))
elif pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)
npimg = pic
if isinstance(pic, torch.FloatTensor):
pic = pic.mul(255).byte()
if isinstance(pic, torch.Tensor):
npimg = np.transpose(pic.numpy(), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = 'L'
elif npimg.dtype == np.int16:
expected_mode = 'I;16'
elif npimg.dtype == np.int32:
expected_mode = 'I'
elif npimg.dtype == np.float32:
expected_mode = 'F'
if mode is not None and mode != expected_mode:
raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
.format(mode, np.dtype, expected_mode))
mode = expected_mode
elif npimg.shape[2] == 2:
permitted_2_channel_modes = ['LA']
if mode is not None and mode not in permitted_2_channel_modes:
raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'LA'
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGBA'
else:
permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGB'
if mode is None:
raise TypeError('Input type {} is not supported'.format(npimg.dtype))
return Image.fromarray(npimg, mode=mode) | [
"def",
"to_pil_image",
"(",
"pic",
",",
"mode",
"=",
"None",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"pic",
",",
"torch",
".",
"Tensor",
")",
"or",
"isinstance",
"(",
"pic",
",",
"np",
".",
"ndarray",
")",
")",
":",
"raise",
"TypeError",
"(",
"'pic should be Tensor or ndarray. Got {}.'",
".",
"format",
"(",
"type",
"(",
"pic",
")",
")",
")",
"elif",
"isinstance",
"(",
"pic",
",",
"torch",
".",
"Tensor",
")",
":",
"if",
"pic",
".",
"ndimension",
"(",
")",
"not",
"in",
"{",
"2",
",",
"3",
"}",
":",
"raise",
"ValueError",
"(",
"'pic should be 2/3 dimensional. Got {} dimensions.'",
".",
"format",
"(",
"pic",
".",
"ndimension",
"(",
")",
")",
")",
"elif",
"pic",
".",
"ndimension",
"(",
")",
"==",
"2",
":",
"# if 2D image, add channel dimension (CHW)",
"pic",
"=",
"pic",
".",
"unsqueeze",
"(",
"0",
")",
"elif",
"isinstance",
"(",
"pic",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"pic",
".",
"ndim",
"not",
"in",
"{",
"2",
",",
"3",
"}",
":",
"raise",
"ValueError",
"(",
"'pic should be 2/3 dimensional. Got {} dimensions.'",
".",
"format",
"(",
"pic",
".",
"ndim",
")",
")",
"elif",
"pic",
".",
"ndim",
"==",
"2",
":",
"# if 2D image, add channel dimension (HWC)",
"pic",
"=",
"np",
".",
"expand_dims",
"(",
"pic",
",",
"2",
")",
"npimg",
"=",
"pic",
"if",
"isinstance",
"(",
"pic",
",",
"torch",
".",
"FloatTensor",
")",
":",
"pic",
"=",
"pic",
".",
"mul",
"(",
"255",
")",
".",
"byte",
"(",
")",
"if",
"isinstance",
"(",
"pic",
",",
"torch",
".",
"Tensor",
")",
":",
"npimg",
"=",
"np",
".",
"transpose",
"(",
"pic",
".",
"numpy",
"(",
")",
",",
"(",
"1",
",",
"2",
",",
"0",
")",
")",
"if",
"not",
"isinstance",
"(",
"npimg",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"TypeError",
"(",
"'Input pic must be a torch.Tensor or NumPy ndarray, '",
"+",
"'not {}'",
".",
"format",
"(",
"type",
"(",
"npimg",
")",
")",
")",
"if",
"npimg",
".",
"shape",
"[",
"2",
"]",
"==",
"1",
":",
"expected_mode",
"=",
"None",
"npimg",
"=",
"npimg",
"[",
":",
",",
":",
",",
"0",
"]",
"if",
"npimg",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"expected_mode",
"=",
"'L'",
"elif",
"npimg",
".",
"dtype",
"==",
"np",
".",
"int16",
":",
"expected_mode",
"=",
"'I;16'",
"elif",
"npimg",
".",
"dtype",
"==",
"np",
".",
"int32",
":",
"expected_mode",
"=",
"'I'",
"elif",
"npimg",
".",
"dtype",
"==",
"np",
".",
"float32",
":",
"expected_mode",
"=",
"'F'",
"if",
"mode",
"is",
"not",
"None",
"and",
"mode",
"!=",
"expected_mode",
":",
"raise",
"ValueError",
"(",
"\"Incorrect mode ({}) supplied for input type {}. Should be {}\"",
".",
"format",
"(",
"mode",
",",
"np",
".",
"dtype",
",",
"expected_mode",
")",
")",
"mode",
"=",
"expected_mode",
"elif",
"npimg",
".",
"shape",
"[",
"2",
"]",
"==",
"2",
":",
"permitted_2_channel_modes",
"=",
"[",
"'LA'",
"]",
"if",
"mode",
"is",
"not",
"None",
"and",
"mode",
"not",
"in",
"permitted_2_channel_modes",
":",
"raise",
"ValueError",
"(",
"\"Only modes {} are supported for 2D inputs\"",
".",
"format",
"(",
"permitted_2_channel_modes",
")",
")",
"if",
"mode",
"is",
"None",
"and",
"npimg",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"mode",
"=",
"'LA'",
"elif",
"npimg",
".",
"shape",
"[",
"2",
"]",
"==",
"4",
":",
"permitted_4_channel_modes",
"=",
"[",
"'RGBA'",
",",
"'CMYK'",
",",
"'RGBX'",
"]",
"if",
"mode",
"is",
"not",
"None",
"and",
"mode",
"not",
"in",
"permitted_4_channel_modes",
":",
"raise",
"ValueError",
"(",
"\"Only modes {} are supported for 4D inputs\"",
".",
"format",
"(",
"permitted_4_channel_modes",
")",
")",
"if",
"mode",
"is",
"None",
"and",
"npimg",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"mode",
"=",
"'RGBA'",
"else",
":",
"permitted_3_channel_modes",
"=",
"[",
"'RGB'",
",",
"'YCbCr'",
",",
"'HSV'",
"]",
"if",
"mode",
"is",
"not",
"None",
"and",
"mode",
"not",
"in",
"permitted_3_channel_modes",
":",
"raise",
"ValueError",
"(",
"\"Only modes {} are supported for 3D inputs\"",
".",
"format",
"(",
"permitted_3_channel_modes",
")",
")",
"if",
"mode",
"is",
"None",
"and",
"npimg",
".",
"dtype",
"==",
"np",
".",
"uint8",
":",
"mode",
"=",
"'RGB'",
"if",
"mode",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'Input type {} is not supported'",
".",
"format",
"(",
"npimg",
".",
"dtype",
")",
")",
"return",
"Image",
".",
"fromarray",
"(",
"npimg",
",",
"mode",
"=",
"mode",
")"
]
| 39.164706 | 23.611765 |
def ratio(self, ratio):
"""
The split ratio of the corporate action - i.e. the ratio of new shares to old shares
:param ratio: A tuple representing (original_count, new_count). For example (1, 2) is a doubling stock split.
(3, 1) is a 3:1 reverse stock split.
:return:
"""
if isinstance(ratio, tuple):
self._ratio = ratio
else:
raise TypeError('Invalid ratio type: %s' % type(ratio)) | [
"def",
"ratio",
"(",
"self",
",",
"ratio",
")",
":",
"if",
"isinstance",
"(",
"ratio",
",",
"tuple",
")",
":",
"self",
".",
"_ratio",
"=",
"ratio",
"else",
":",
"raise",
"TypeError",
"(",
"'Invalid ratio type: %s'",
"%",
"type",
"(",
"ratio",
")",
")"
]
| 42 | 22 |
def create_stack(Name=None, Region=None, VpcId=None, Attributes=None, ServiceRoleArn=None, DefaultInstanceProfileArn=None, DefaultOs=None, HostnameTheme=None, DefaultAvailabilityZone=None, DefaultSubnetId=None, CustomJson=None, ConfigurationManager=None, ChefConfiguration=None, UseCustomCookbooks=None, UseOpsworksSecurityGroups=None, CustomCookbooksSource=None, DefaultSshKeyName=None, DefaultRootDeviceType=None, AgentVersion=None):
"""
Creates a new stack. For more information, see Create a New Stack .
See also: AWS API Documentation
:example: response = client.create_stack(
Name='string',
Region='string',
VpcId='string',
Attributes={
'string': 'string'
},
ServiceRoleArn='string',
DefaultInstanceProfileArn='string',
DefaultOs='string',
HostnameTheme='string',
DefaultAvailabilityZone='string',
DefaultSubnetId='string',
CustomJson='string',
ConfigurationManager={
'Name': 'string',
'Version': 'string'
},
ChefConfiguration={
'ManageBerkshelf': True|False,
'BerkshelfVersion': 'string'
},
UseCustomCookbooks=True|False,
UseOpsworksSecurityGroups=True|False,
CustomCookbooksSource={
'Type': 'git'|'svn'|'archive'|'s3',
'Url': 'string',
'Username': 'string',
'Password': 'string',
'SshKey': 'string',
'Revision': 'string'
},
DefaultSshKeyName='string',
DefaultRootDeviceType='ebs'|'instance-store',
AgentVersion='string'
)
:type Name: string
:param Name: [REQUIRED]
The stack name.
:type Region: string
:param Region: [REQUIRED]
The stack's AWS region, such as 'ap-south-1'. For more information about Amazon regions, see Regions and Endpoints .
:type VpcId: string
:param VpcId: The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later.
If your account supports EC2-Classic, the default value is no VPC .
If your account does not support EC2-Classic, the default value is the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively.
If you specify a nondefault VPC ID, note the following:
It must belong to a VPC in your account that is in the specified region.
You must specify a value for DefaultSubnetId .
For more information on how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC . For more information on default VPC and EC2-Classic, see Supported Platforms .
:type Attributes: dict
:param Attributes: One or more user-defined key-value pairs to be added to the stack attributes.
(string) --
(string) --
:type ServiceRoleArn: string
:param ServiceRoleArn: [REQUIRED]
The stack's AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. For more information about IAM ARNs, see Using Identifiers .
:type DefaultInstanceProfileArn: string
:param DefaultInstanceProfileArn: [REQUIRED]
The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers .
:type DefaultOs: string
:param DefaultOs: The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.
A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2016.09 , Amazon Linux 2016.03 , Amazon Linux 2015.09 , or Amazon Linux 2015.03 .
A supported Ubuntu operating system, such as Ubuntu 16.04 LTS , Ubuntu 14.04 LTS , or Ubuntu 12.04 LTS .
CentOS Linux 7
Red Hat Enterprise Linux 7
A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base , Microsoft Windows Server 2012 R2 with SQL Server Express , Microsoft Windows Server 2012 R2 with SQL Server Standard , or Microsoft Windows Server 2012 R2 with SQL Server Web .
A custom AMI: Custom . You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs .
The default option is the current Amazon Linux version. For more information on the supported operating systems, see AWS OpsWorks Stacks Operating Systems .
:type HostnameTheme: string
:param HostnameTheme: The stack's host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent , which creates host names by appending integers to the layer's short name. The other themes are:
Baked_Goods
Clouds
Europe_Cities
Fruits
Greek_Deities
Legendary_creatures_from_Japan
Planets_and_Moons
Roman_Deities
Scottish_Islands
US_Cities
Wild_Cats
To obtain a generated host name, call GetHostNameSuggestion , which returns a host name based on the current theme.
:type DefaultAvailabilityZone: string
:param DefaultAvailabilityZone: The stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints . If you also specify a value for DefaultSubnetId , the subnet must be in the same zone. For more information, see the VpcId parameter description.
:type DefaultSubnetId: string
:param DefaultSubnetId: The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone , the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description.
:type CustomJson: string
:param CustomJson: A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format:
'{\'key1\': \'value1\', \'key2\': \'value2\',...}'
For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes .
:type ConfigurationManager: dict
:param ConfigurationManager: The configuration manager. When you create a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 11.4.
Name (string) --The name. This parameter must be set to 'Chef'.
Version (string) --The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. The default value for Linux stacks is 11.4.
:type ChefConfiguration: dict
:param ChefConfiguration: A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack .
ManageBerkshelf (boolean) --Whether to enable Berkshelf.
BerkshelfVersion (string) --The Berkshelf version.
:type UseCustomCookbooks: boolean
:param UseCustomCookbooks: Whether the stack uses custom cookbooks.
:type UseOpsworksSecurityGroups: boolean
:param UseOpsworksSecurityGroups: Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.
AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings:
True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.
False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.
For more information, see Create a New Stack .
:type CustomCookbooksSource: dict
:param CustomCookbooksSource: Contains the information required to retrieve an app or cookbook from a repository. For more information, see Creating Apps or Custom Recipes and Cookbooks .
Type (string) --The repository type.
Url (string) --The source URL.
Username (string) --This parameter depends on the repository type.
For Amazon S3 bundles, set Username to the appropriate IAM access key ID.
For HTTP bundles, Git repositories, and Subversion repositories, set Username to the user name.
Password (string) --When included in a request, the parameter depends on the repository type.
For Amazon S3 bundles, set Password to the appropriate IAM secret access key.
For HTTP bundles and Subversion repositories, set Password to the password.
For more information on how to safely handle IAM credentials, see http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html .
In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value.
SshKey (string) --In requests, the repository's SSH key.
In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value.
Revision (string) --The application's version. AWS OpsWorks Stacks enables you to easily deploy new versions of an application. One of the simplest approaches is to have branches or revisions in your repository that represent different versions that can potentially be deployed.
:type DefaultSshKeyName: string
:param DefaultSshKeyName: A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access . You can override this setting by specifying a different key pair, or no key pair, when you create an instance .
:type DefaultRootDeviceType: string
:param DefaultRootDeviceType: The default root device type. This value is the default for all instances in the stack, but you can override it when you create an instance. The default option is instance-store . For more information, see Storage for the Root Device .
:type AgentVersion: string
:param AgentVersion: The default AWS OpsWorks Stacks agent version. You have the following options:
Auto-update - Set this parameter to LATEST . AWS OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available.
Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances.
The default setting is the most recent release of the agent. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions . AgentVersion cannot be set to Chef 12.2.
Note
You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.
:rtype: dict
:return: {
'StackId': 'string'
}
"""
pass | [
"def",
"create_stack",
"(",
"Name",
"=",
"None",
",",
"Region",
"=",
"None",
",",
"VpcId",
"=",
"None",
",",
"Attributes",
"=",
"None",
",",
"ServiceRoleArn",
"=",
"None",
",",
"DefaultInstanceProfileArn",
"=",
"None",
",",
"DefaultOs",
"=",
"None",
",",
"HostnameTheme",
"=",
"None",
",",
"DefaultAvailabilityZone",
"=",
"None",
",",
"DefaultSubnetId",
"=",
"None",
",",
"CustomJson",
"=",
"None",
",",
"ConfigurationManager",
"=",
"None",
",",
"ChefConfiguration",
"=",
"None",
",",
"UseCustomCookbooks",
"=",
"None",
",",
"UseOpsworksSecurityGroups",
"=",
"None",
",",
"CustomCookbooksSource",
"=",
"None",
",",
"DefaultSshKeyName",
"=",
"None",
",",
"DefaultRootDeviceType",
"=",
"None",
",",
"AgentVersion",
"=",
"None",
")",
":",
"pass"
]
| 71.147541 | 56.961749 |
def load_config(self, config=None):
''' loads a config file
Parameters:
config (str):
Optional name of manual config file to load
'''
# Read the config file
cfgname = (config or self.config_name)
cfgname = 'sdsswork' if cfgname is None else cfgname
assert isinstance(cfgname, six.string_types), 'config name must be a string'
config_name = cfgname if cfgname.endswith('.cfg') else '{0}.cfg'.format(cfgname)
self.configfile = os.path.join(self.treedir, 'data', config_name)
assert os.path.isfile(self.configfile) is True, 'configfile {0} must exist in the proper directory'.format(self.configfile)
self._cfg = SafeConfigParser()
try:
self._cfg.read(self.configfile.decode('utf-8'))
except AttributeError:
self._cfg.read(self.configfile)
# create the local tree environment
self.environ = OrderedDict()
self.environ['default'] = self._cfg.defaults()
# set the filesystem envvar to sas_base_dir
self._file_replace = '@FILESYSTEM@'
if self.environ['default']['filesystem'] == self._file_replace:
self.environ['default']['filesystem'] = self.sasbasedir | [
"def",
"load_config",
"(",
"self",
",",
"config",
"=",
"None",
")",
":",
"# Read the config file",
"cfgname",
"=",
"(",
"config",
"or",
"self",
".",
"config_name",
")",
"cfgname",
"=",
"'sdsswork'",
"if",
"cfgname",
"is",
"None",
"else",
"cfgname",
"assert",
"isinstance",
"(",
"cfgname",
",",
"six",
".",
"string_types",
")",
",",
"'config name must be a string'",
"config_name",
"=",
"cfgname",
"if",
"cfgname",
".",
"endswith",
"(",
"'.cfg'",
")",
"else",
"'{0}.cfg'",
".",
"format",
"(",
"cfgname",
")",
"self",
".",
"configfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"treedir",
",",
"'data'",
",",
"config_name",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"configfile",
")",
"is",
"True",
",",
"'configfile {0} must exist in the proper directory'",
".",
"format",
"(",
"self",
".",
"configfile",
")",
"self",
".",
"_cfg",
"=",
"SafeConfigParser",
"(",
")",
"try",
":",
"self",
".",
"_cfg",
".",
"read",
"(",
"self",
".",
"configfile",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"except",
"AttributeError",
":",
"self",
".",
"_cfg",
".",
"read",
"(",
"self",
".",
"configfile",
")",
"# create the local tree environment",
"self",
".",
"environ",
"=",
"OrderedDict",
"(",
")",
"self",
".",
"environ",
"[",
"'default'",
"]",
"=",
"self",
".",
"_cfg",
".",
"defaults",
"(",
")",
"# set the filesystem envvar to sas_base_dir",
"self",
".",
"_file_replace",
"=",
"'@FILESYSTEM@'",
"if",
"self",
".",
"environ",
"[",
"'default'",
"]",
"[",
"'filesystem'",
"]",
"==",
"self",
".",
"_file_replace",
":",
"self",
".",
"environ",
"[",
"'default'",
"]",
"[",
"'filesystem'",
"]",
"=",
"self",
".",
"sasbasedir"
]
| 39.967742 | 24.032258 |
def nsmap(self):
"""
Returns the current namespace mapping as a dictionary
there are several problems with the map and we try to guess a few
things here:
1) a URI can be mapped by many prefixes, so it is to decide which one to take
2) a prefix might map to an empty string (some packers)
3) uri+prefix mappings might be included several times
4) prefix might be empty
"""
NSMAP = dict()
# solve 3) by using a set
for k, v in set(self.namespaces):
s_prefix = self.sb[k]
s_uri = self.sb[v]
# Solve 2) & 4) by not including
if s_uri != "" and s_prefix != "":
# solve 1) by using the last one in the list
NSMAP[s_prefix] = s_uri
return NSMAP | [
"def",
"nsmap",
"(",
"self",
")",
":",
"NSMAP",
"=",
"dict",
"(",
")",
"# solve 3) by using a set",
"for",
"k",
",",
"v",
"in",
"set",
"(",
"self",
".",
"namespaces",
")",
":",
"s_prefix",
"=",
"self",
".",
"sb",
"[",
"k",
"]",
"s_uri",
"=",
"self",
".",
"sb",
"[",
"v",
"]",
"# Solve 2) & 4) by not including",
"if",
"s_uri",
"!=",
"\"\"",
"and",
"s_prefix",
"!=",
"\"\"",
":",
"# solve 1) by using the last one in the list",
"NSMAP",
"[",
"s_prefix",
"]",
"=",
"s_uri",
"return",
"NSMAP"
]
| 33.416667 | 18.75 |
def on_menu_clear_interpretation(self, event):
'''
clear all current interpretations.
'''
# delete all previous interpretation
for sp in list(self.Data.keys()):
del self.Data[sp]['pars']
self.Data[sp]['pars'] = {}
self.Data[sp]['pars']['lab_dc_field'] = self.Data[sp]['lab_dc_field']
self.Data[sp]['pars']['er_specimen_name'] = self.Data[sp]['er_specimen_name']
self.Data[sp]['pars']['er_sample_name'] = self.Data[sp]['er_sample_name']
self.Data_samples = {}
self.Data_sites = {}
self.tmin_box.SetValue("")
self.tmax_box.SetValue("")
self.clear_boxes()
self.draw_figure(self.s) | [
"def",
"on_menu_clear_interpretation",
"(",
"self",
",",
"event",
")",
":",
"# delete all previous interpretation",
"for",
"sp",
"in",
"list",
"(",
"self",
".",
"Data",
".",
"keys",
"(",
")",
")",
":",
"del",
"self",
".",
"Data",
"[",
"sp",
"]",
"[",
"'pars'",
"]",
"self",
".",
"Data",
"[",
"sp",
"]",
"[",
"'pars'",
"]",
"=",
"{",
"}",
"self",
".",
"Data",
"[",
"sp",
"]",
"[",
"'pars'",
"]",
"[",
"'lab_dc_field'",
"]",
"=",
"self",
".",
"Data",
"[",
"sp",
"]",
"[",
"'lab_dc_field'",
"]",
"self",
".",
"Data",
"[",
"sp",
"]",
"[",
"'pars'",
"]",
"[",
"'er_specimen_name'",
"]",
"=",
"self",
".",
"Data",
"[",
"sp",
"]",
"[",
"'er_specimen_name'",
"]",
"self",
".",
"Data",
"[",
"sp",
"]",
"[",
"'pars'",
"]",
"[",
"'er_sample_name'",
"]",
"=",
"self",
".",
"Data",
"[",
"sp",
"]",
"[",
"'er_sample_name'",
"]",
"self",
".",
"Data_samples",
"=",
"{",
"}",
"self",
".",
"Data_sites",
"=",
"{",
"}",
"self",
".",
"tmin_box",
".",
"SetValue",
"(",
"\"\"",
")",
"self",
".",
"tmax_box",
".",
"SetValue",
"(",
"\"\"",
")",
"self",
".",
"clear_boxes",
"(",
")",
"self",
".",
"draw_figure",
"(",
"self",
".",
"s",
")"
]
| 39.444444 | 17.111111 |
def compare(self, statement_a, statement_b):
"""
Compare the two input statements.
:return: The percent of similarity between the closest synset distance.
:rtype: float
"""
document_a = self.nlp(statement_a.text)
document_b = self.nlp(statement_b.text)
return document_a.similarity(document_b) | [
"def",
"compare",
"(",
"self",
",",
"statement_a",
",",
"statement_b",
")",
":",
"document_a",
"=",
"self",
".",
"nlp",
"(",
"statement_a",
".",
"text",
")",
"document_b",
"=",
"self",
".",
"nlp",
"(",
"statement_b",
".",
"text",
")",
"return",
"document_a",
".",
"similarity",
"(",
"document_b",
")"
]
| 31.727273 | 15 |
def _get_attributes(schema, location):
"""Return the schema's children, filtered by location."""
schema = DottedNameResolver(__name__).maybe_resolve(schema)
def _filter(attr):
if not hasattr(attr, "location"):
valid_location = 'body' in location
else:
valid_location = attr.location in to_list(location)
return valid_location
return list(filter(_filter, schema().children)) | [
"def",
"_get_attributes",
"(",
"schema",
",",
"location",
")",
":",
"schema",
"=",
"DottedNameResolver",
"(",
"__name__",
")",
".",
"maybe_resolve",
"(",
"schema",
")",
"def",
"_filter",
"(",
"attr",
")",
":",
"if",
"not",
"hasattr",
"(",
"attr",
",",
"\"location\"",
")",
":",
"valid_location",
"=",
"'body'",
"in",
"location",
"else",
":",
"valid_location",
"=",
"attr",
".",
"location",
"in",
"to_list",
"(",
"location",
")",
"return",
"valid_location",
"return",
"list",
"(",
"filter",
"(",
"_filter",
",",
"schema",
"(",
")",
".",
"children",
")",
")"
]
| 38.666667 | 17.583333 |
def create_transaction(self, outputs, fee=None, leftover=None, combine=True,
message=None, unspents=None, custom_pushdata=False): # pragma: no cover
"""Creates a signed P2PKH transaction.
:param outputs: A sequence of outputs you wish to send in the form
``(destination, amount, currency)``. The amount can
be either an int, float, or string as long as it is
a valid input to ``decimal.Decimal``. The currency
must be :ref:`supported <supported currencies>`.
:type outputs: ``list`` of ``tuple``
:param fee: The number of satoshi per byte to pay to miners. By default
Bitcash will poll `<https://bitcoincashfees.earn.com>`_ and use a fee
that will allow your transaction to be confirmed as soon as
possible.
:type fee: ``int``
:param leftover: The destination that will receive any change from the
transaction. By default Bitcash will send any change to
the same address you sent from.
:type leftover: ``str``
:param combine: Whether or not Bitcash should use all available UTXOs to
make future transactions smaller and therefore reduce
fees. By default Bitcash will consolidate UTXOs.
:type combine: ``bool``
:param message: A message to include in the transaction. This will be
stored in the blockchain forever. Due to size limits,
each message will be stored in chunks of 220 bytes.
:type message: ``str``
:param unspents: The UTXOs to use as the inputs. By default Bitcash will
communicate with the blockchain itself.
:type unspents: ``list`` of :class:`~bitcash.network.meta.Unspent`
:returns: The signed transaction as hex.
:rtype: ``str``
"""
unspents, outputs = sanitize_tx_data(
unspents or self.unspents,
outputs,
fee or get_fee(),
leftover or self.address,
combine=combine,
message=message,
compressed=self.is_compressed(),
custom_pushdata=custom_pushdata
)
return create_p2pkh_transaction(self, unspents, outputs, custom_pushdata=custom_pushdata) | [
"def",
"create_transaction",
"(",
"self",
",",
"outputs",
",",
"fee",
"=",
"None",
",",
"leftover",
"=",
"None",
",",
"combine",
"=",
"True",
",",
"message",
"=",
"None",
",",
"unspents",
"=",
"None",
",",
"custom_pushdata",
"=",
"False",
")",
":",
"# pragma: no cover",
"unspents",
",",
"outputs",
"=",
"sanitize_tx_data",
"(",
"unspents",
"or",
"self",
".",
"unspents",
",",
"outputs",
",",
"fee",
"or",
"get_fee",
"(",
")",
",",
"leftover",
"or",
"self",
".",
"address",
",",
"combine",
"=",
"combine",
",",
"message",
"=",
"message",
",",
"compressed",
"=",
"self",
".",
"is_compressed",
"(",
")",
",",
"custom_pushdata",
"=",
"custom_pushdata",
")",
"return",
"create_p2pkh_transaction",
"(",
"self",
",",
"unspents",
",",
"outputs",
",",
"custom_pushdata",
"=",
"custom_pushdata",
")"
]
| 52.586957 | 25.304348 |
def gene_tree(
self,
scale_to=None,
population_size=1,
trim_names=True,
):
""" Using the current tree object as a species tree, generate a gene
tree using the constrained Kingman coalescent process from dendropy. The
species tree should probably be a valid, ultrametric tree, generated by
some pure birth, birth-death or coalescent process, but no checks are
made. Optional kwargs are: -- scale_to, which is a floating point value
to scale the total tree tip-to-root length to, -- population_size, which
is a floating point value which all branch lengths will be divided by to
convert them to coalescent units, and -- trim_names, boolean, defaults
to true, trims off the number which dendropy appends to the sequence
name """
tree = self.template or self.yule()
for leaf in tree._tree.leaf_node_iter():
leaf.num_genes = 1
dfr = tree._tree.seed_node.distance_from_root()
dft = tree._tree.seed_node.distance_from_tip()
tree_height = dfr + dft
if scale_to:
population_size = tree_height / scale_to
for edge in tree._tree.preorder_edge_iter():
edge.pop_size = population_size
gene_tree = dpy.simulate.treesim.constrained_kingman_tree(tree._tree)[0]
if trim_names:
for leaf in gene_tree.leaf_node_iter():
leaf.taxon.label = leaf.taxon.label.replace('\'', '').split('_')[0]
# Dendropy changed its API
return {'gene_tree': tree.__class__(gene_tree.as_string('newick', suppress_rooting=True).strip(';\n') + ';'),
'species_tree': tree} | [
"def",
"gene_tree",
"(",
"self",
",",
"scale_to",
"=",
"None",
",",
"population_size",
"=",
"1",
",",
"trim_names",
"=",
"True",
",",
")",
":",
"tree",
"=",
"self",
".",
"template",
"or",
"self",
".",
"yule",
"(",
")",
"for",
"leaf",
"in",
"tree",
".",
"_tree",
".",
"leaf_node_iter",
"(",
")",
":",
"leaf",
".",
"num_genes",
"=",
"1",
"dfr",
"=",
"tree",
".",
"_tree",
".",
"seed_node",
".",
"distance_from_root",
"(",
")",
"dft",
"=",
"tree",
".",
"_tree",
".",
"seed_node",
".",
"distance_from_tip",
"(",
")",
"tree_height",
"=",
"dfr",
"+",
"dft",
"if",
"scale_to",
":",
"population_size",
"=",
"tree_height",
"/",
"scale_to",
"for",
"edge",
"in",
"tree",
".",
"_tree",
".",
"preorder_edge_iter",
"(",
")",
":",
"edge",
".",
"pop_size",
"=",
"population_size",
"gene_tree",
"=",
"dpy",
".",
"simulate",
".",
"treesim",
".",
"constrained_kingman_tree",
"(",
"tree",
".",
"_tree",
")",
"[",
"0",
"]",
"if",
"trim_names",
":",
"for",
"leaf",
"in",
"gene_tree",
".",
"leaf_node_iter",
"(",
")",
":",
"leaf",
".",
"taxon",
".",
"label",
"=",
"leaf",
".",
"taxon",
".",
"label",
".",
"replace",
"(",
"'\\''",
",",
"''",
")",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"# Dendropy changed its API",
"return",
"{",
"'gene_tree'",
":",
"tree",
".",
"__class__",
"(",
"gene_tree",
".",
"as_string",
"(",
"'newick'",
",",
"suppress_rooting",
"=",
"True",
")",
".",
"strip",
"(",
"';\\n'",
")",
"+",
"';'",
")",
",",
"'species_tree'",
":",
"tree",
"}"
]
| 41.317073 | 25.658537 |
def expand(matrix, power):
"""
Apply cluster expansion to the given matrix by raising
the matrix to the given power.
:param matrix: The matrix to be expanded
:param power: Cluster expansion parameter
:returns: The expanded matrix
"""
if isspmatrix(matrix):
return matrix ** power
return np.linalg.matrix_power(matrix, power) | [
"def",
"expand",
"(",
"matrix",
",",
"power",
")",
":",
"if",
"isspmatrix",
"(",
"matrix",
")",
":",
"return",
"matrix",
"**",
"power",
"return",
"np",
".",
"linalg",
".",
"matrix_power",
"(",
"matrix",
",",
"power",
")"
]
| 27.846154 | 12.461538 |
def forwards(self, orm):
"Write your forwards methods here."
# Project labels
names = list(orm['samples.Project'].objects.values_list('label', flat=True))
orm['samples.Cohort'].objects.filter(name__in=names).update(published=True)
# World cohort
orm['samples.Cohort'].objects.filter(name=DEFAULT_COHORT_NAME).update(published=True) | [
"def",
"forwards",
"(",
"self",
",",
"orm",
")",
":",
"# Project labels",
"names",
"=",
"list",
"(",
"orm",
"[",
"'samples.Project'",
"]",
".",
"objects",
".",
"values_list",
"(",
"'label'",
",",
"flat",
"=",
"True",
")",
")",
"orm",
"[",
"'samples.Cohort'",
"]",
".",
"objects",
".",
"filter",
"(",
"name__in",
"=",
"names",
")",
".",
"update",
"(",
"published",
"=",
"True",
")",
"# World cohort",
"orm",
"[",
"'samples.Cohort'",
"]",
".",
"objects",
".",
"filter",
"(",
"name",
"=",
"DEFAULT_COHORT_NAME",
")",
".",
"update",
"(",
"published",
"=",
"True",
")"
]
| 46.625 | 29.125 |
def version_option(f):
"""
Largely a custom clone of click.version_option -- almost identical, but
prints our special output.
"""
def callback(ctx, param, value):
# copied from click.decorators.version_option
# no idea what resilient_parsing means, but...
if not value or ctx.resilient_parsing:
return
print_version()
ctx.exit(0)
return click.option(
"--version",
is_flag=True,
expose_value=False,
is_eager=True,
callback=callback,
cls=HiddenOption,
)(f) | [
"def",
"version_option",
"(",
"f",
")",
":",
"def",
"callback",
"(",
"ctx",
",",
"param",
",",
"value",
")",
":",
"# copied from click.decorators.version_option",
"# no idea what resilient_parsing means, but...",
"if",
"not",
"value",
"or",
"ctx",
".",
"resilient_parsing",
":",
"return",
"print_version",
"(",
")",
"ctx",
".",
"exit",
"(",
"0",
")",
"return",
"click",
".",
"option",
"(",
"\"--version\"",
",",
"is_flag",
"=",
"True",
",",
"expose_value",
"=",
"False",
",",
"is_eager",
"=",
"True",
",",
"callback",
"=",
"callback",
",",
"cls",
"=",
"HiddenOption",
",",
")",
"(",
"f",
")"
]
| 24.478261 | 18.565217 |
def _on_wid_changed(self, wid):
"""Called when the widget is changed"""
if self._itsme: return
self.update_model(self._get_idx_from_widget(wid))
return | [
"def",
"_on_wid_changed",
"(",
"self",
",",
"wid",
")",
":",
"if",
"self",
".",
"_itsme",
":",
"return",
"self",
".",
"update_model",
"(",
"self",
".",
"_get_idx_from_widget",
"(",
"wid",
")",
")",
"return"
]
| 35.8 | 12.4 |
def read_code(self, name):
"""Reads code from a python file called 'name'"""
file_path = self.gen_file_path(name)
with open(file_path) as f:
code = f.read()
return code | [
"def",
"read_code",
"(",
"self",
",",
"name",
")",
":",
"file_path",
"=",
"self",
".",
"gen_file_path",
"(",
"name",
")",
"with",
"open",
"(",
"file_path",
")",
"as",
"f",
":",
"code",
"=",
"f",
".",
"read",
"(",
")",
"return",
"code"
]
| 29.571429 | 14 |
def delete_row(self, index):
"""
Deletes a Row by it's index
:param int index: the index of the row. zero indexed
:return bool: Success or Failure
"""
url = self.build_url(self._endpoints.get('delete_row').format(id=index))
return bool(self.session.post(url)) | [
"def",
"delete_row",
"(",
"self",
",",
"index",
")",
":",
"url",
"=",
"self",
".",
"build_url",
"(",
"self",
".",
"_endpoints",
".",
"get",
"(",
"'delete_row'",
")",
".",
"format",
"(",
"id",
"=",
"index",
")",
")",
"return",
"bool",
"(",
"self",
".",
"session",
".",
"post",
"(",
"url",
")",
")"
]
| 38.5 | 10 |
def getAssociation(self, server_url, handle=None):
"""Retrieve an association. If no handle is specified, return
the association with the latest expiration.
(str, str or NoneType) -> Association or NoneType
"""
if handle is None:
handle = ''
# The filename with the empty handle is a prefix of all other
# associations for the given server URL.
filename = self.getAssociationFilename(server_url, handle)
if handle:
return self._getAssociation(filename)
else:
association_files = os.listdir(self.association_dir)
matching_files = []
# strip off the path to do the comparison
name = os.path.basename(filename)
for association_file in association_files:
if association_file.startswith(name):
matching_files.append(association_file)
matching_associations = []
# read the matching files and sort by time issued
for name in matching_files:
full_name = os.path.join(self.association_dir, name)
association = self._getAssociation(full_name)
if association is not None:
matching_associations.append(
(association.issued, association))
matching_associations.sort()
# return the most recently issued one.
if matching_associations:
(_, assoc) = matching_associations[-1]
return assoc
else:
return None | [
"def",
"getAssociation",
"(",
"self",
",",
"server_url",
",",
"handle",
"=",
"None",
")",
":",
"if",
"handle",
"is",
"None",
":",
"handle",
"=",
"''",
"# The filename with the empty handle is a prefix of all other",
"# associations for the given server URL.",
"filename",
"=",
"self",
".",
"getAssociationFilename",
"(",
"server_url",
",",
"handle",
")",
"if",
"handle",
":",
"return",
"self",
".",
"_getAssociation",
"(",
"filename",
")",
"else",
":",
"association_files",
"=",
"os",
".",
"listdir",
"(",
"self",
".",
"association_dir",
")",
"matching_files",
"=",
"[",
"]",
"# strip off the path to do the comparison",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"for",
"association_file",
"in",
"association_files",
":",
"if",
"association_file",
".",
"startswith",
"(",
"name",
")",
":",
"matching_files",
".",
"append",
"(",
"association_file",
")",
"matching_associations",
"=",
"[",
"]",
"# read the matching files and sort by time issued",
"for",
"name",
"in",
"matching_files",
":",
"full_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"association_dir",
",",
"name",
")",
"association",
"=",
"self",
".",
"_getAssociation",
"(",
"full_name",
")",
"if",
"association",
"is",
"not",
"None",
":",
"matching_associations",
".",
"append",
"(",
"(",
"association",
".",
"issued",
",",
"association",
")",
")",
"matching_associations",
".",
"sort",
"(",
")",
"# return the most recently issued one.",
"if",
"matching_associations",
":",
"(",
"_",
",",
"assoc",
")",
"=",
"matching_associations",
"[",
"-",
"1",
"]",
"return",
"assoc",
"else",
":",
"return",
"None"
]
| 38.512195 | 17.195122 |
def format_name(format, name, target_type, prop_set):
""" Given a target, as given to a custom tag rule, returns a string formatted
according to the passed format. Format is a list of properties that is
represented in the result. For each element of format the corresponding target
information is obtained and added to the result string. For all, but the
literal, the format value is taken as the as string to prepend to the output
to join the item to the rest of the result. If not given "-" is used as a
joiner.
The format options can be:
<base>[joiner]
:: The basename of the target name.
<toolset>[joiner]
:: The abbreviated toolset tag being used to build the target.
<threading>[joiner]
:: Indication of a multi-threaded build.
<runtime>[joiner]
:: Collective tag of the build runtime.
<version:/version-feature | X.Y[.Z]/>[joiner]
:: Short version tag taken from the given "version-feature"
in the build properties. Or if not present, the literal
value as the version number.
<property:/property-name/>[joiner]
:: Direct lookup of the given property-name value in the
build properties. /property-name/ is a regular expression.
e.g. <property:toolset-.*:flavor> will match every toolset.
/otherwise/
:: The literal value of the format argument.
For example this format:
boost_ <base> <toolset> <threading> <runtime> <version:boost-version>
Might return:
boost_thread-vc80-mt-gd-1_33.dll, or
boost_regex-vc80-gd-1_33.dll
The returned name also has the target type specific prefix and suffix which
puts it in a ready form to use as the value from a custom tag rule.
"""
if __debug__:
from ..build.property_set import PropertySet
assert is_iterable_typed(format, basestring)
assert isinstance(name, basestring)
assert isinstance(target_type, basestring)
assert isinstance(prop_set, PropertySet)
# assert(isinstance(prop_set, property_set.PropertySet))
if type.is_derived(target_type, 'LIB'):
result = "" ;
for f in format:
grist = get_grist(f)
if grist == '<base>':
result += os.path.basename(name)
elif grist == '<toolset>':
result += join_tag(get_value(f),
toolset_tag(name, target_type, prop_set))
elif grist == '<threading>':
result += join_tag(get_value(f),
threading_tag(name, target_type, prop_set))
elif grist == '<runtime>':
result += join_tag(get_value(f),
runtime_tag(name, target_type, prop_set))
elif grist.startswith('<version:'):
key = grist[len('<version:'):-1]
version = prop_set.get('<' + key + '>')
if not version:
version = key
version = __re_version.match(version)
result += join_tag(get_value(f), version[1] + '_' + version[2])
elif grist.startswith('<property:'):
key = grist[len('<property:'):-1]
property_re = re.compile('<(' + key + ')>')
p0 = None
for prop in prop_set.raw():
match = property_re.match(prop)
if match:
p0 = match[1]
break
if p0:
p = prop_set.get('<' + p0 + '>')
if p:
assert(len(p) == 1)
result += join_tag(ungrist(f), p)
else:
result += f
result = b2.build.virtual_target.add_prefix_and_suffix(
''.join(result), target_type, prop_set)
return result | [
"def",
"format_name",
"(",
"format",
",",
"name",
",",
"target_type",
",",
"prop_set",
")",
":",
"if",
"__debug__",
":",
"from",
".",
".",
"build",
".",
"property_set",
"import",
"PropertySet",
"assert",
"is_iterable_typed",
"(",
"format",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"target_type",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"prop_set",
",",
"PropertySet",
")",
"# assert(isinstance(prop_set, property_set.PropertySet))",
"if",
"type",
".",
"is_derived",
"(",
"target_type",
",",
"'LIB'",
")",
":",
"result",
"=",
"\"\"",
"for",
"f",
"in",
"format",
":",
"grist",
"=",
"get_grist",
"(",
"f",
")",
"if",
"grist",
"==",
"'<base>'",
":",
"result",
"+=",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
"elif",
"grist",
"==",
"'<toolset>'",
":",
"result",
"+=",
"join_tag",
"(",
"get_value",
"(",
"f",
")",
",",
"toolset_tag",
"(",
"name",
",",
"target_type",
",",
"prop_set",
")",
")",
"elif",
"grist",
"==",
"'<threading>'",
":",
"result",
"+=",
"join_tag",
"(",
"get_value",
"(",
"f",
")",
",",
"threading_tag",
"(",
"name",
",",
"target_type",
",",
"prop_set",
")",
")",
"elif",
"grist",
"==",
"'<runtime>'",
":",
"result",
"+=",
"join_tag",
"(",
"get_value",
"(",
"f",
")",
",",
"runtime_tag",
"(",
"name",
",",
"target_type",
",",
"prop_set",
")",
")",
"elif",
"grist",
".",
"startswith",
"(",
"'<version:'",
")",
":",
"key",
"=",
"grist",
"[",
"len",
"(",
"'<version:'",
")",
":",
"-",
"1",
"]",
"version",
"=",
"prop_set",
".",
"get",
"(",
"'<'",
"+",
"key",
"+",
"'>'",
")",
"if",
"not",
"version",
":",
"version",
"=",
"key",
"version",
"=",
"__re_version",
".",
"match",
"(",
"version",
")",
"result",
"+=",
"join_tag",
"(",
"get_value",
"(",
"f",
")",
",",
"version",
"[",
"1",
"]",
"+",
"'_'",
"+",
"version",
"[",
"2",
"]",
")",
"elif",
"grist",
".",
"startswith",
"(",
"'<property:'",
")",
":",
"key",
"=",
"grist",
"[",
"len",
"(",
"'<property:'",
")",
":",
"-",
"1",
"]",
"property_re",
"=",
"re",
".",
"compile",
"(",
"'<('",
"+",
"key",
"+",
"')>'",
")",
"p0",
"=",
"None",
"for",
"prop",
"in",
"prop_set",
".",
"raw",
"(",
")",
":",
"match",
"=",
"property_re",
".",
"match",
"(",
"prop",
")",
"if",
"match",
":",
"p0",
"=",
"match",
"[",
"1",
"]",
"break",
"if",
"p0",
":",
"p",
"=",
"prop_set",
".",
"get",
"(",
"'<'",
"+",
"p0",
"+",
"'>'",
")",
"if",
"p",
":",
"assert",
"(",
"len",
"(",
"p",
")",
"==",
"1",
")",
"result",
"+=",
"join_tag",
"(",
"ungrist",
"(",
"f",
")",
",",
"p",
")",
"else",
":",
"result",
"+=",
"f",
"result",
"=",
"b2",
".",
"build",
".",
"virtual_target",
".",
"add_prefix_and_suffix",
"(",
"''",
".",
"join",
"(",
"result",
")",
",",
"target_type",
",",
"prop_set",
")",
"return",
"result"
]
| 43.67033 | 18.373626 |
def _cost_func(x, kernel_options, tuning_options, runner, results, cache):
""" Cost function used by minimize """
error_time = 1e20
logging.debug('_cost_func called')
logging.debug('x: ' + str(x))
x_key = ",".join([str(i) for i in x])
if x_key in cache:
return cache[x_key]
#snap values in x to nearest actual value for each parameter unscale x if needed
if tuning_options.scaling:
params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps)
else:
params = snap_to_nearest_config(x, tuning_options.tune_params)
logging.debug('params ' + str(params))
x_int = ",".join([str(i) for i in params])
if x_int in cache:
return cache[x_int]
#check if this is a legal (non-restricted) parameter instance
if tuning_options.restrictions:
legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose)
if not legal:
cache[x_int] = error_time
cache[x_key] = error_time
return error_time
#compile and benchmark this instance
res, _ = runner.run([params], kernel_options, tuning_options)
#append to tuning results
if res:
results.append(res[0])
cache[x_int] = res[0]['time']
cache[x_key] = res[0]['time']
return res[0]['time']
cache[x_int] = error_time
cache[x_key] = error_time
return error_time | [
"def",
"_cost_func",
"(",
"x",
",",
"kernel_options",
",",
"tuning_options",
",",
"runner",
",",
"results",
",",
"cache",
")",
":",
"error_time",
"=",
"1e20",
"logging",
".",
"debug",
"(",
"'_cost_func called'",
")",
"logging",
".",
"debug",
"(",
"'x: '",
"+",
"str",
"(",
"x",
")",
")",
"x_key",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"x",
"]",
")",
"if",
"x_key",
"in",
"cache",
":",
"return",
"cache",
"[",
"x_key",
"]",
"#snap values in x to nearest actual value for each parameter unscale x if needed",
"if",
"tuning_options",
".",
"scaling",
":",
"params",
"=",
"unscale_and_snap_to_nearest",
"(",
"x",
",",
"tuning_options",
".",
"tune_params",
",",
"tuning_options",
".",
"eps",
")",
"else",
":",
"params",
"=",
"snap_to_nearest_config",
"(",
"x",
",",
"tuning_options",
".",
"tune_params",
")",
"logging",
".",
"debug",
"(",
"'params '",
"+",
"str",
"(",
"params",
")",
")",
"x_int",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"params",
"]",
")",
"if",
"x_int",
"in",
"cache",
":",
"return",
"cache",
"[",
"x_int",
"]",
"#check if this is a legal (non-restricted) parameter instance",
"if",
"tuning_options",
".",
"restrictions",
":",
"legal",
"=",
"util",
".",
"check_restrictions",
"(",
"tuning_options",
".",
"restrictions",
",",
"params",
",",
"tuning_options",
".",
"tune_params",
".",
"keys",
"(",
")",
",",
"tuning_options",
".",
"verbose",
")",
"if",
"not",
"legal",
":",
"cache",
"[",
"x_int",
"]",
"=",
"error_time",
"cache",
"[",
"x_key",
"]",
"=",
"error_time",
"return",
"error_time",
"#compile and benchmark this instance",
"res",
",",
"_",
"=",
"runner",
".",
"run",
"(",
"[",
"params",
"]",
",",
"kernel_options",
",",
"tuning_options",
")",
"#append to tuning results",
"if",
"res",
":",
"results",
".",
"append",
"(",
"res",
"[",
"0",
"]",
")",
"cache",
"[",
"x_int",
"]",
"=",
"res",
"[",
"0",
"]",
"[",
"'time'",
"]",
"cache",
"[",
"x_key",
"]",
"=",
"res",
"[",
"0",
"]",
"[",
"'time'",
"]",
"return",
"res",
"[",
"0",
"]",
"[",
"'time'",
"]",
"cache",
"[",
"x_int",
"]",
"=",
"error_time",
"cache",
"[",
"x_key",
"]",
"=",
"error_time",
"return",
"error_time"
]
| 32.704545 | 21.75 |
def redraw(self, col=0):
"""redraw image, applying the following:
rotation, flips, log scale
max/min values from sliders or explicit intensity ranges
color map
interpolation
"""
conf = self.conf
# note: rotation re-calls display(), to reset the image
# other transformations will just do .set_data() on image
if conf.rot:
if self.xdata is not None:
self.xdata = self.xdata[::-1]
if self.ydata is not None:
self.ydata = self.ydata[:]
self.display(np.rot90(conf.data),
x=self.ydata, xlabel=self.ylab,
y=self.xdata, ylabel=self.xlab)
# flips, log scales
img = conf.data
if img is None: return
if len(img.shape) == 2:
col = 0
if self.conf.style == 'image':
if conf.flip_ud: img = np.flipud(img)
if conf.flip_lr: img = np.fliplr(img)
if conf.log_scale:
img = np.log10(1 + 9.0*img)
# apply intensity scale for current limited (zoomed) image
if len(img.shape) == 2:
# apply clipped color scale, as from sliders
imin = float(conf.int_lo[col])
imax = float(conf.int_hi[col])
if conf.log_scale:
imin = np.log10(1 + 9.0*imin)
imax = np.log10(1 + 9.0*imax)
(xmin, xmax, ymin, ymax) = self.conf.datalimits
if xmin is None: xmin = 0
if xmax is None: xmax = img.shape[1]
if ymin is None: ymin = 0
if ymax is None: ymax = img.shape[0]
img = (img - imin)/(imax - imin + 1.e-8)
mlo = conf.cmap_lo[0]/(1.0*conf.cmap_range)
mhi = conf.cmap_hi[0]/(1.0*conf.cmap_range)
if self.conf.style == 'image':
conf.image.set_data(np.clip((img - mlo)/(mhi - mlo + 1.e-8), 0, 1))
conf.image.set_interpolation(conf.interp)
else:
r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
rmin = float(conf.int_lo[0])
rmax = float(conf.int_hi[0])
gmin = float(conf.int_lo[1])
gmax = float(conf.int_hi[1])
bmin = float(conf.int_lo[2])
bmax = float(conf.int_hi[2])
if conf.log_scale:
rmin = np.log10(1 + 9.0*rmin)
rmax = np.log10(1 + 9.0*rmax)
gmin = np.log10(1 + 9.0*gmin)
gmax = np.log10(1 + 9.0*gmax)
bmin = np.log10(1 + 9.0*bmin)
bmax = np.log10(1 + 9.0*bmax)
rlo = conf.cmap_lo[0]/(1.0*conf.cmap_range)
rhi = conf.cmap_hi[0]/(1.0*conf.cmap_range)
glo = conf.cmap_lo[1]/(1.0*conf.cmap_range)
ghi = conf.cmap_hi[1]/(1.0*conf.cmap_range)
blo = conf.cmap_lo[2]/(1.0*conf.cmap_range)
bhi = conf.cmap_hi[2]/(1.0*conf.cmap_range)
r = (r - rmin)/(rmax - rmin + 1.e-8)
g = (g - gmin)/(gmax - gmin + 1.e-8)
b = (b - bmin)/(bmax - bmin + 1.e-8)
inew = img*1.0
inew[:,:,0] = np.clip((r - rlo)/(rhi - rlo + 1.e-8), 0, 1)
inew[:,:,1] = np.clip((g - glo)/(ghi - glo + 1.e-8), 0, 1)
inew[:,:,2] = np.clip((b - blo)/(bhi - blo + 1.e-8), 0, 1)
whitebg = conf.tricolor_bg.startswith('wh')
if whitebg:
inew = conf.tricolor_white_bg(inew)
if self.conf.style == 'image':
conf.image.set_data(inew)
conf.image.set_interpolation(conf.interp)
self.canvas.draw()
if callable(self.redraw_callback):
self.redraw_callback(wid=self.GetId()) | [
"def",
"redraw",
"(",
"self",
",",
"col",
"=",
"0",
")",
":",
"conf",
"=",
"self",
".",
"conf",
"# note: rotation re-calls display(), to reset the image",
"# other transformations will just do .set_data() on image",
"if",
"conf",
".",
"rot",
":",
"if",
"self",
".",
"xdata",
"is",
"not",
"None",
":",
"self",
".",
"xdata",
"=",
"self",
".",
"xdata",
"[",
":",
":",
"-",
"1",
"]",
"if",
"self",
".",
"ydata",
"is",
"not",
"None",
":",
"self",
".",
"ydata",
"=",
"self",
".",
"ydata",
"[",
":",
"]",
"self",
".",
"display",
"(",
"np",
".",
"rot90",
"(",
"conf",
".",
"data",
")",
",",
"x",
"=",
"self",
".",
"ydata",
",",
"xlabel",
"=",
"self",
".",
"ylab",
",",
"y",
"=",
"self",
".",
"xdata",
",",
"ylabel",
"=",
"self",
".",
"xlab",
")",
"# flips, log scales",
"img",
"=",
"conf",
".",
"data",
"if",
"img",
"is",
"None",
":",
"return",
"if",
"len",
"(",
"img",
".",
"shape",
")",
"==",
"2",
":",
"col",
"=",
"0",
"if",
"self",
".",
"conf",
".",
"style",
"==",
"'image'",
":",
"if",
"conf",
".",
"flip_ud",
":",
"img",
"=",
"np",
".",
"flipud",
"(",
"img",
")",
"if",
"conf",
".",
"flip_lr",
":",
"img",
"=",
"np",
".",
"fliplr",
"(",
"img",
")",
"if",
"conf",
".",
"log_scale",
":",
"img",
"=",
"np",
".",
"log10",
"(",
"1",
"+",
"9.0",
"*",
"img",
")",
"# apply intensity scale for current limited (zoomed) image",
"if",
"len",
"(",
"img",
".",
"shape",
")",
"==",
"2",
":",
"# apply clipped color scale, as from sliders",
"imin",
"=",
"float",
"(",
"conf",
".",
"int_lo",
"[",
"col",
"]",
")",
"imax",
"=",
"float",
"(",
"conf",
".",
"int_hi",
"[",
"col",
"]",
")",
"if",
"conf",
".",
"log_scale",
":",
"imin",
"=",
"np",
".",
"log10",
"(",
"1",
"+",
"9.0",
"*",
"imin",
")",
"imax",
"=",
"np",
".",
"log10",
"(",
"1",
"+",
"9.0",
"*",
"imax",
")",
"(",
"xmin",
",",
"xmax",
",",
"ymin",
",",
"ymax",
")",
"=",
"self",
".",
"conf",
".",
"datalimits",
"if",
"xmin",
"is",
"None",
":",
"xmin",
"=",
"0",
"if",
"xmax",
"is",
"None",
":",
"xmax",
"=",
"img",
".",
"shape",
"[",
"1",
"]",
"if",
"ymin",
"is",
"None",
":",
"ymin",
"=",
"0",
"if",
"ymax",
"is",
"None",
":",
"ymax",
"=",
"img",
".",
"shape",
"[",
"0",
"]",
"img",
"=",
"(",
"img",
"-",
"imin",
")",
"/",
"(",
"imax",
"-",
"imin",
"+",
"1.e-8",
")",
"mlo",
"=",
"conf",
".",
"cmap_lo",
"[",
"0",
"]",
"/",
"(",
"1.0",
"*",
"conf",
".",
"cmap_range",
")",
"mhi",
"=",
"conf",
".",
"cmap_hi",
"[",
"0",
"]",
"/",
"(",
"1.0",
"*",
"conf",
".",
"cmap_range",
")",
"if",
"self",
".",
"conf",
".",
"style",
"==",
"'image'",
":",
"conf",
".",
"image",
".",
"set_data",
"(",
"np",
".",
"clip",
"(",
"(",
"img",
"-",
"mlo",
")",
"/",
"(",
"mhi",
"-",
"mlo",
"+",
"1.e-8",
")",
",",
"0",
",",
"1",
")",
")",
"conf",
".",
"image",
".",
"set_interpolation",
"(",
"conf",
".",
"interp",
")",
"else",
":",
"r",
",",
"g",
",",
"b",
"=",
"img",
"[",
":",
",",
":",
",",
"0",
"]",
",",
"img",
"[",
":",
",",
":",
",",
"1",
"]",
",",
"img",
"[",
":",
",",
":",
",",
"2",
"]",
"rmin",
"=",
"float",
"(",
"conf",
".",
"int_lo",
"[",
"0",
"]",
")",
"rmax",
"=",
"float",
"(",
"conf",
".",
"int_hi",
"[",
"0",
"]",
")",
"gmin",
"=",
"float",
"(",
"conf",
".",
"int_lo",
"[",
"1",
"]",
")",
"gmax",
"=",
"float",
"(",
"conf",
".",
"int_hi",
"[",
"1",
"]",
")",
"bmin",
"=",
"float",
"(",
"conf",
".",
"int_lo",
"[",
"2",
"]",
")",
"bmax",
"=",
"float",
"(",
"conf",
".",
"int_hi",
"[",
"2",
"]",
")",
"if",
"conf",
".",
"log_scale",
":",
"rmin",
"=",
"np",
".",
"log10",
"(",
"1",
"+",
"9.0",
"*",
"rmin",
")",
"rmax",
"=",
"np",
".",
"log10",
"(",
"1",
"+",
"9.0",
"*",
"rmax",
")",
"gmin",
"=",
"np",
".",
"log10",
"(",
"1",
"+",
"9.0",
"*",
"gmin",
")",
"gmax",
"=",
"np",
".",
"log10",
"(",
"1",
"+",
"9.0",
"*",
"gmax",
")",
"bmin",
"=",
"np",
".",
"log10",
"(",
"1",
"+",
"9.0",
"*",
"bmin",
")",
"bmax",
"=",
"np",
".",
"log10",
"(",
"1",
"+",
"9.0",
"*",
"bmax",
")",
"rlo",
"=",
"conf",
".",
"cmap_lo",
"[",
"0",
"]",
"/",
"(",
"1.0",
"*",
"conf",
".",
"cmap_range",
")",
"rhi",
"=",
"conf",
".",
"cmap_hi",
"[",
"0",
"]",
"/",
"(",
"1.0",
"*",
"conf",
".",
"cmap_range",
")",
"glo",
"=",
"conf",
".",
"cmap_lo",
"[",
"1",
"]",
"/",
"(",
"1.0",
"*",
"conf",
".",
"cmap_range",
")",
"ghi",
"=",
"conf",
".",
"cmap_hi",
"[",
"1",
"]",
"/",
"(",
"1.0",
"*",
"conf",
".",
"cmap_range",
")",
"blo",
"=",
"conf",
".",
"cmap_lo",
"[",
"2",
"]",
"/",
"(",
"1.0",
"*",
"conf",
".",
"cmap_range",
")",
"bhi",
"=",
"conf",
".",
"cmap_hi",
"[",
"2",
"]",
"/",
"(",
"1.0",
"*",
"conf",
".",
"cmap_range",
")",
"r",
"=",
"(",
"r",
"-",
"rmin",
")",
"/",
"(",
"rmax",
"-",
"rmin",
"+",
"1.e-8",
")",
"g",
"=",
"(",
"g",
"-",
"gmin",
")",
"/",
"(",
"gmax",
"-",
"gmin",
"+",
"1.e-8",
")",
"b",
"=",
"(",
"b",
"-",
"bmin",
")",
"/",
"(",
"bmax",
"-",
"bmin",
"+",
"1.e-8",
")",
"inew",
"=",
"img",
"*",
"1.0",
"inew",
"[",
":",
",",
":",
",",
"0",
"]",
"=",
"np",
".",
"clip",
"(",
"(",
"r",
"-",
"rlo",
")",
"/",
"(",
"rhi",
"-",
"rlo",
"+",
"1.e-8",
")",
",",
"0",
",",
"1",
")",
"inew",
"[",
":",
",",
":",
",",
"1",
"]",
"=",
"np",
".",
"clip",
"(",
"(",
"g",
"-",
"glo",
")",
"/",
"(",
"ghi",
"-",
"glo",
"+",
"1.e-8",
")",
",",
"0",
",",
"1",
")",
"inew",
"[",
":",
",",
":",
",",
"2",
"]",
"=",
"np",
".",
"clip",
"(",
"(",
"b",
"-",
"blo",
")",
"/",
"(",
"bhi",
"-",
"blo",
"+",
"1.e-8",
")",
",",
"0",
",",
"1",
")",
"whitebg",
"=",
"conf",
".",
"tricolor_bg",
".",
"startswith",
"(",
"'wh'",
")",
"if",
"whitebg",
":",
"inew",
"=",
"conf",
".",
"tricolor_white_bg",
"(",
"inew",
")",
"if",
"self",
".",
"conf",
".",
"style",
"==",
"'image'",
":",
"conf",
".",
"image",
".",
"set_data",
"(",
"inew",
")",
"conf",
".",
"image",
".",
"set_interpolation",
"(",
"conf",
".",
"interp",
")",
"self",
".",
"canvas",
".",
"draw",
"(",
")",
"if",
"callable",
"(",
"self",
".",
"redraw_callback",
")",
":",
"self",
".",
"redraw_callback",
"(",
"wid",
"=",
"self",
".",
"GetId",
"(",
")",
")"
]
| 38.479167 | 14.760417 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.