text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def handle_page_location_changed(self, timeout=None):
'''
If the chrome tab has internally redirected (generally because jerberscript), this
will walk the page navigation responses and attempt to fetch the response body for
the tab's latest location.
'''
# In general, this is often called after other mechanisms have confirmed
# that the tab has already navigated. As such, we want to not wait a while
# to discover something went wrong, so use a timeout that basically just
# results in checking the available buffer, and nothing else.
if not timeout:
timeout = 0.1
self.log.debug("We may have redirected. Checking.")
messages = self.transport.recv_all_filtered(filter_funcs.capture_loading_events, tab_key=self.tab_id)
if not messages:
raise ChromeError("Couldn't track redirect! No idea what to do!")
last_message = messages[-1]
self.log.info("Probably a redirect! New content url: '%s'", last_message['params']['documentURL'])
resp = self.transport.recv_filtered(filter_funcs.network_response_recieved_for_url(last_message['params']['documentURL'], last_message['params']['frameId']), tab_key=self.tab_id)
resp = resp['params']
ctype = 'application/unknown'
resp_response = resp['response']
if 'mimeType' in resp_response:
ctype = resp_response['mimeType']
if 'headers' in resp_response and 'content-type' in resp_response['headers']:
ctype = resp_response['headers']['content-type'].split(";")[0]
# We assume the last document request was the redirect.
# This is /probably/ kind of a poor practice, but what the hell.
# I have no idea what this would do if there are non-html documents (or if that can even happen.)
return self.get_unpacked_response_body(last_message['params']['requestId'], mimetype=ctype)
|
[
"def",
"handle_page_location_changed",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"# In general, this is often called after other mechanisms have confirmed",
"# that the tab has already navigated. As such, we want to not wait a while",
"# to discover something went wrong, so use a timeout that basically just",
"# results in checking the available buffer, and nothing else.",
"if",
"not",
"timeout",
":",
"timeout",
"=",
"0.1",
"self",
".",
"log",
".",
"debug",
"(",
"\"We may have redirected. Checking.\"",
")",
"messages",
"=",
"self",
".",
"transport",
".",
"recv_all_filtered",
"(",
"filter_funcs",
".",
"capture_loading_events",
",",
"tab_key",
"=",
"self",
".",
"tab_id",
")",
"if",
"not",
"messages",
":",
"raise",
"ChromeError",
"(",
"\"Couldn't track redirect! No idea what to do!\"",
")",
"last_message",
"=",
"messages",
"[",
"-",
"1",
"]",
"self",
".",
"log",
".",
"info",
"(",
"\"Probably a redirect! New content url: '%s'\"",
",",
"last_message",
"[",
"'params'",
"]",
"[",
"'documentURL'",
"]",
")",
"resp",
"=",
"self",
".",
"transport",
".",
"recv_filtered",
"(",
"filter_funcs",
".",
"network_response_recieved_for_url",
"(",
"last_message",
"[",
"'params'",
"]",
"[",
"'documentURL'",
"]",
",",
"last_message",
"[",
"'params'",
"]",
"[",
"'frameId'",
"]",
")",
",",
"tab_key",
"=",
"self",
".",
"tab_id",
")",
"resp",
"=",
"resp",
"[",
"'params'",
"]",
"ctype",
"=",
"'application/unknown'",
"resp_response",
"=",
"resp",
"[",
"'response'",
"]",
"if",
"'mimeType'",
"in",
"resp_response",
":",
"ctype",
"=",
"resp_response",
"[",
"'mimeType'",
"]",
"if",
"'headers'",
"in",
"resp_response",
"and",
"'content-type'",
"in",
"resp_response",
"[",
"'headers'",
"]",
":",
"ctype",
"=",
"resp_response",
"[",
"'headers'",
"]",
"[",
"'content-type'",
"]",
".",
"split",
"(",
"\";\"",
")",
"[",
"0",
"]",
"# We assume the last document request was the redirect.",
"# This is /probably/ kind of a poor practice, but what the hell.",
"# I have no idea what this would do if there are non-html documents (or if that can even happen.)",
"return",
"self",
".",
"get_unpacked_response_body",
"(",
"last_message",
"[",
"'params'",
"]",
"[",
"'requestId'",
"]",
",",
"mimetype",
"=",
"ctype",
")"
] | 44.769231 | 33.74359 |
def get(self, sid):
"""
Constructs a FactorContext
:param sid: A string that uniquely identifies this Factor.
:returns: twilio.rest.authy.v1.service.entity.factor.FactorContext
:rtype: twilio.rest.authy.v1.service.entity.factor.FactorContext
"""
return FactorContext(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
sid=sid,
)
|
[
"def",
"get",
"(",
"self",
",",
"sid",
")",
":",
"return",
"FactorContext",
"(",
"self",
".",
"_version",
",",
"service_sid",
"=",
"self",
".",
"_solution",
"[",
"'service_sid'",
"]",
",",
"identity",
"=",
"self",
".",
"_solution",
"[",
"'identity'",
"]",
",",
"sid",
"=",
"sid",
",",
")"
] | 31.533333 | 19.8 |
def lineReceived(self, line):
"""
A line was received.
"""
if line.startswith(b"#"): # ignore it
return
if line.startswith(b"OK"):
# if no command issued, then just 'ready'
if self._ready:
self._dq.pop(0).callback(self._currentResponse(line))
else:
self._ready = True
if line.startswith(b"D "):
self._bufferedData.append(line[2:].replace(b"%0A", b"\r")
.replace(b"%0D", b"\n")
.replace(b"%25", b"%"))
if line.startswith(b"ERR"):
self._dq.pop(0).errback(AssuanError(line))
|
[
"def",
"lineReceived",
"(",
"self",
",",
"line",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"b\"#\"",
")",
":",
"# ignore it",
"return",
"if",
"line",
".",
"startswith",
"(",
"b\"OK\"",
")",
":",
"# if no command issued, then just 'ready'",
"if",
"self",
".",
"_ready",
":",
"self",
".",
"_dq",
".",
"pop",
"(",
"0",
")",
".",
"callback",
"(",
"self",
".",
"_currentResponse",
"(",
"line",
")",
")",
"else",
":",
"self",
".",
"_ready",
"=",
"True",
"if",
"line",
".",
"startswith",
"(",
"b\"D \"",
")",
":",
"self",
".",
"_bufferedData",
".",
"append",
"(",
"line",
"[",
"2",
":",
"]",
".",
"replace",
"(",
"b\"%0A\"",
",",
"b\"\\r\"",
")",
".",
"replace",
"(",
"b\"%0D\"",
",",
"b\"\\n\"",
")",
".",
"replace",
"(",
"b\"%25\"",
",",
"b\"%\"",
")",
")",
"if",
"line",
".",
"startswith",
"(",
"b\"ERR\"",
")",
":",
"self",
".",
"_dq",
".",
"pop",
"(",
"0",
")",
".",
"errback",
"(",
"AssuanError",
"(",
"line",
")",
")"
] | 38.333333 | 13.111111 |
def verification_list(self, limit=10):
"""
Get list of verifications. Uses GET to /verifications interface.
:Returns: (list) Verification list as specified `here <https://cloud.knuverse.com/docs/api/#api-Verifications-Get_verification_list>`_.
"""
# TODO add arguments for paging and stuff
params = {}
params["limit"] = limit
response = self._get(url.verifications, params=params)
self._check_response(response, 200)
return self._create_response(response).get("verifications")
|
[
"def",
"verification_list",
"(",
"self",
",",
"limit",
"=",
"10",
")",
":",
"# TODO add arguments for paging and stuff",
"params",
"=",
"{",
"}",
"params",
"[",
"\"limit\"",
"]",
"=",
"limit",
"response",
"=",
"self",
".",
"_get",
"(",
"url",
".",
"verifications",
",",
"params",
"=",
"params",
")",
"self",
".",
"_check_response",
"(",
"response",
",",
"200",
")",
"return",
"self",
".",
"_create_response",
"(",
"response",
")",
".",
"get",
"(",
"\"verifications\"",
")"
] | 39.071429 | 24.928571 |
def set_data(self, data, addr=0):
'''
Sets data for outgoing stream
'''
if self._mem_bytes < len(data):
raise ValueError('Size of data (%d bytes) is too big for memory (%d bytes)' % (len(data), self._mem_bytes))
self._intf.write(self._conf['base_addr'] + self._spi_mem_offset + addr, data)
|
[
"def",
"set_data",
"(",
"self",
",",
"data",
",",
"addr",
"=",
"0",
")",
":",
"if",
"self",
".",
"_mem_bytes",
"<",
"len",
"(",
"data",
")",
":",
"raise",
"ValueError",
"(",
"'Size of data (%d bytes) is too big for memory (%d bytes)'",
"%",
"(",
"len",
"(",
"data",
")",
",",
"self",
".",
"_mem_bytes",
")",
")",
"self",
".",
"_intf",
".",
"write",
"(",
"self",
".",
"_conf",
"[",
"'base_addr'",
"]",
"+",
"self",
".",
"_spi_mem_offset",
"+",
"addr",
",",
"data",
")"
] | 47.857143 | 27.571429 |
def group_update(auth=None, **kwargs):
'''
Update a group
CLI Example:
.. code-block:: bash
salt '*' keystoneng.group_update name=group1 description='new description'
salt '*' keystoneng.group_create name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e new_name=newgroupname
salt '*' keystoneng.group_create name=0e4febc2a5ab4f2c8f374b054162506d new_name=newgroupname
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
if 'new_name' in kwargs:
kwargs['name'] = kwargs.pop('new_name')
return cloud.update_group(**kwargs)
|
[
"def",
"group_update",
"(",
"auth",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"cloud",
"=",
"get_operator_cloud",
"(",
"auth",
")",
"kwargs",
"=",
"_clean_kwargs",
"(",
"*",
"*",
"kwargs",
")",
"if",
"'new_name'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'name'",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"'new_name'",
")",
"return",
"cloud",
".",
"update_group",
"(",
"*",
"*",
"kwargs",
")"
] | 35 | 26.882353 |
def _slice_weights(self, arr, li, lh):
"""slice fused rnn weights"""
args = {}
gate_names = self._gate_names
directions = self._directions
b = len(directions)
p = 0
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_weight'%(self._prefix, direction, layer, gate)
if layer > 0:
size = b*lh*lh
args[name] = arr[p:p+size].reshape((lh, b*lh))
else:
size = li*lh
args[name] = arr[p:p+size].reshape((lh, li))
p += size
for gate in gate_names:
name = '%s%s%d_h2h%s_weight'%(self._prefix, direction, layer, gate)
size = lh**2
args[name] = arr[p:p+size].reshape((lh, lh))
p += size
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
for gate in gate_names:
name = '%s%s%d_h2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
assert p == arr.size, "Invalid parameters size for FusedRNNCell"
return args
|
[
"def",
"_slice_weights",
"(",
"self",
",",
"arr",
",",
"li",
",",
"lh",
")",
":",
"args",
"=",
"{",
"}",
"gate_names",
"=",
"self",
".",
"_gate_names",
"directions",
"=",
"self",
".",
"_directions",
"b",
"=",
"len",
"(",
"directions",
")",
"p",
"=",
"0",
"for",
"layer",
"in",
"range",
"(",
"self",
".",
"_num_layers",
")",
":",
"for",
"direction",
"in",
"directions",
":",
"for",
"gate",
"in",
"gate_names",
":",
"name",
"=",
"'%s%s%d_i2h%s_weight'",
"%",
"(",
"self",
".",
"_prefix",
",",
"direction",
",",
"layer",
",",
"gate",
")",
"if",
"layer",
">",
"0",
":",
"size",
"=",
"b",
"*",
"lh",
"*",
"lh",
"args",
"[",
"name",
"]",
"=",
"arr",
"[",
"p",
":",
"p",
"+",
"size",
"]",
".",
"reshape",
"(",
"(",
"lh",
",",
"b",
"*",
"lh",
")",
")",
"else",
":",
"size",
"=",
"li",
"*",
"lh",
"args",
"[",
"name",
"]",
"=",
"arr",
"[",
"p",
":",
"p",
"+",
"size",
"]",
".",
"reshape",
"(",
"(",
"lh",
",",
"li",
")",
")",
"p",
"+=",
"size",
"for",
"gate",
"in",
"gate_names",
":",
"name",
"=",
"'%s%s%d_h2h%s_weight'",
"%",
"(",
"self",
".",
"_prefix",
",",
"direction",
",",
"layer",
",",
"gate",
")",
"size",
"=",
"lh",
"**",
"2",
"args",
"[",
"name",
"]",
"=",
"arr",
"[",
"p",
":",
"p",
"+",
"size",
"]",
".",
"reshape",
"(",
"(",
"lh",
",",
"lh",
")",
")",
"p",
"+=",
"size",
"for",
"layer",
"in",
"range",
"(",
"self",
".",
"_num_layers",
")",
":",
"for",
"direction",
"in",
"directions",
":",
"for",
"gate",
"in",
"gate_names",
":",
"name",
"=",
"'%s%s%d_i2h%s_bias'",
"%",
"(",
"self",
".",
"_prefix",
",",
"direction",
",",
"layer",
",",
"gate",
")",
"args",
"[",
"name",
"]",
"=",
"arr",
"[",
"p",
":",
"p",
"+",
"lh",
"]",
"p",
"+=",
"lh",
"for",
"gate",
"in",
"gate_names",
":",
"name",
"=",
"'%s%s%d_h2h%s_bias'",
"%",
"(",
"self",
".",
"_prefix",
",",
"direction",
",",
"layer",
",",
"gate",
")",
"args",
"[",
"name",
"]",
"=",
"arr",
"[",
"p",
":",
"p",
"+",
"lh",
"]",
"p",
"+=",
"lh",
"assert",
"p",
"==",
"arr",
".",
"size",
",",
"\"Invalid parameters size for FusedRNNCell\"",
"return",
"args"
] | 40.342105 | 16.210526 |
def run(name, chip_bam, input_bam, genome_build, out_dir, method, resources, data):
"""
Run macs2 for chip and input samples avoiding
errors due to samples.
"""
# output file name need to have the caller name
config = dd.get_config(data)
out_file = os.path.join(out_dir, name + "_peaks_macs2.xls")
macs2_file = os.path.join(out_dir, name + "_peaks.xls")
if utils.file_exists(out_file):
_compres_bdg_files(out_dir)
return _get_output_files(out_dir)
macs2 = config_utils.get_program("macs2", config)
options = " ".join(resources.get("macs2", {}).get("options", ""))
genome_size = bam.fasta.total_sequence_length(dd.get_ref_file(data))
genome_size = "" if options.find("-g") > -1 else "-g %s" % genome_size
paired = "-f BAMPE" if bam.is_paired(chip_bam) else ""
with utils.chdir(out_dir):
cmd = _macs2_cmd(method)
try:
do.run(cmd.format(**locals()), "macs2 for %s" % name)
utils.move_safe(macs2_file, out_file)
except subprocess.CalledProcessError:
raise RuntimeWarning("macs2 terminated with an error.\n"
"Please, check the message and report "
"error if it is related to bcbio.\n"
"You can add specific options for the sample "
"setting resources as explained in docs: "
"https://bcbio-nextgen.readthedocs.org/en/latest/contents/configuration.html#sample-specific-resources")
_compres_bdg_files(out_dir)
return _get_output_files(out_dir)
|
[
"def",
"run",
"(",
"name",
",",
"chip_bam",
",",
"input_bam",
",",
"genome_build",
",",
"out_dir",
",",
"method",
",",
"resources",
",",
"data",
")",
":",
"# output file name need to have the caller name",
"config",
"=",
"dd",
".",
"get_config",
"(",
"data",
")",
"out_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"name",
"+",
"\"_peaks_macs2.xls\"",
")",
"macs2_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"name",
"+",
"\"_peaks.xls\"",
")",
"if",
"utils",
".",
"file_exists",
"(",
"out_file",
")",
":",
"_compres_bdg_files",
"(",
"out_dir",
")",
"return",
"_get_output_files",
"(",
"out_dir",
")",
"macs2",
"=",
"config_utils",
".",
"get_program",
"(",
"\"macs2\"",
",",
"config",
")",
"options",
"=",
"\" \"",
".",
"join",
"(",
"resources",
".",
"get",
"(",
"\"macs2\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"options\"",
",",
"\"\"",
")",
")",
"genome_size",
"=",
"bam",
".",
"fasta",
".",
"total_sequence_length",
"(",
"dd",
".",
"get_ref_file",
"(",
"data",
")",
")",
"genome_size",
"=",
"\"\"",
"if",
"options",
".",
"find",
"(",
"\"-g\"",
")",
">",
"-",
"1",
"else",
"\"-g %s\"",
"%",
"genome_size",
"paired",
"=",
"\"-f BAMPE\"",
"if",
"bam",
".",
"is_paired",
"(",
"chip_bam",
")",
"else",
"\"\"",
"with",
"utils",
".",
"chdir",
"(",
"out_dir",
")",
":",
"cmd",
"=",
"_macs2_cmd",
"(",
"method",
")",
"try",
":",
"do",
".",
"run",
"(",
"cmd",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
",",
"\"macs2 for %s\"",
"%",
"name",
")",
"utils",
".",
"move_safe",
"(",
"macs2_file",
",",
"out_file",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"raise",
"RuntimeWarning",
"(",
"\"macs2 terminated with an error.\\n\"",
"\"Please, check the message and report \"",
"\"error if it is related to bcbio.\\n\"",
"\"You can add specific options for the sample \"",
"\"setting resources as explained in docs: \"",
"\"https://bcbio-nextgen.readthedocs.org/en/latest/contents/configuration.html#sample-specific-resources\"",
")",
"_compres_bdg_files",
"(",
"out_dir",
")",
"return",
"_get_output_files",
"(",
"out_dir",
")"
] | 52.096774 | 20.032258 |
def get_num_features(estimator):
""" Return size of a feature vector estimator expects as an input. """
if hasattr(estimator, 'coef_'): # linear models
if len(estimator.coef_.shape) == 0:
return 1
return estimator.coef_.shape[-1]
elif hasattr(estimator, 'feature_importances_'): # ensembles
return estimator.feature_importances_.shape[-1]
elif hasattr(estimator, 'feature_count_'): # naive bayes
return estimator.feature_count_.shape[-1]
elif hasattr(estimator, 'theta_'):
return estimator.theta_.shape[-1]
elif hasattr(estimator, 'estimators_') and len(estimator.estimators_):
# OvR
return get_num_features(estimator.estimators_[0])
else:
raise ValueError("Can't figure out feature vector size for %s" %
estimator)
|
[
"def",
"get_num_features",
"(",
"estimator",
")",
":",
"if",
"hasattr",
"(",
"estimator",
",",
"'coef_'",
")",
":",
"# linear models",
"if",
"len",
"(",
"estimator",
".",
"coef_",
".",
"shape",
")",
"==",
"0",
":",
"return",
"1",
"return",
"estimator",
".",
"coef_",
".",
"shape",
"[",
"-",
"1",
"]",
"elif",
"hasattr",
"(",
"estimator",
",",
"'feature_importances_'",
")",
":",
"# ensembles",
"return",
"estimator",
".",
"feature_importances_",
".",
"shape",
"[",
"-",
"1",
"]",
"elif",
"hasattr",
"(",
"estimator",
",",
"'feature_count_'",
")",
":",
"# naive bayes",
"return",
"estimator",
".",
"feature_count_",
".",
"shape",
"[",
"-",
"1",
"]",
"elif",
"hasattr",
"(",
"estimator",
",",
"'theta_'",
")",
":",
"return",
"estimator",
".",
"theta_",
".",
"shape",
"[",
"-",
"1",
"]",
"elif",
"hasattr",
"(",
"estimator",
",",
"'estimators_'",
")",
"and",
"len",
"(",
"estimator",
".",
"estimators_",
")",
":",
"# OvR",
"return",
"get_num_features",
"(",
"estimator",
".",
"estimators_",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Can't figure out feature vector size for %s\"",
"%",
"estimator",
")"
] | 46.111111 | 14.555556 |
def run(self, request, tempdir, opts):
"""
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
"""
with open(os.path.join(self.workdir, "request.json"), "w") as f:
json.dump(request, f)
with open(os.path.join(self.workdir, "cwl.input.json"), "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
workflow_url = request.get("workflow_url") # Will always be local path to descriptor cwl, or url.
output = open(os.path.join(self.workdir, "cwl.output.json"), "w")
stderr = open(os.path.join(self.workdir, "stderr"), "w")
runner = opts.getopt("runner", default="cwl-runner")
extra = opts.getoptlist("extra")
# replace any locally specified outdir with the default
for e in extra:
if e.startswith('--outdir='):
extra.remove(e)
extra.append('--outdir=' + self.outdir)
# link the cwl and json into the tempdir/cwd
if workflow_url.startswith('file://'):
os.symlink(workflow_url[7:], os.path.join(tempdir, "wes_workflow.cwl"))
workflow_url = os.path.join(tempdir, "wes_workflow.cwl")
os.symlink(inputtemp.name, os.path.join(tempdir, "cwl.input.json"))
jsonpath = os.path.join(tempdir, "cwl.input.json")
# build args and run
command_args = [runner] + extra + [workflow_url, jsonpath]
proc = subprocess.Popen(command_args,
stdout=output,
stderr=stderr,
close_fds=True,
cwd=tempdir)
output.close()
stderr.close()
with open(os.path.join(self.workdir, "pid"), "w") as pid:
pid.write(str(proc.pid))
return self.getstatus()
|
[
"def",
"run",
"(",
"self",
",",
"request",
",",
"tempdir",
",",
"opts",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"request.json\"",
")",
",",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"request",
",",
"f",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"cwl.input.json\"",
")",
",",
"\"w\"",
")",
"as",
"inputtemp",
":",
"json",
".",
"dump",
"(",
"request",
"[",
"\"workflow_params\"",
"]",
",",
"inputtemp",
")",
"workflow_url",
"=",
"request",
".",
"get",
"(",
"\"workflow_url\"",
")",
"# Will always be local path to descriptor cwl, or url.",
"output",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"cwl.output.json\"",
")",
",",
"\"w\"",
")",
"stderr",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"stderr\"",
")",
",",
"\"w\"",
")",
"runner",
"=",
"opts",
".",
"getopt",
"(",
"\"runner\"",
",",
"default",
"=",
"\"cwl-runner\"",
")",
"extra",
"=",
"opts",
".",
"getoptlist",
"(",
"\"extra\"",
")",
"# replace any locally specified outdir with the default",
"for",
"e",
"in",
"extra",
":",
"if",
"e",
".",
"startswith",
"(",
"'--outdir='",
")",
":",
"extra",
".",
"remove",
"(",
"e",
")",
"extra",
".",
"append",
"(",
"'--outdir='",
"+",
"self",
".",
"outdir",
")",
"# link the cwl and json into the tempdir/cwd",
"if",
"workflow_url",
".",
"startswith",
"(",
"'file://'",
")",
":",
"os",
".",
"symlink",
"(",
"workflow_url",
"[",
"7",
":",
"]",
",",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"\"wes_workflow.cwl\"",
")",
")",
"workflow_url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"\"wes_workflow.cwl\"",
")",
"os",
".",
"symlink",
"(",
"inputtemp",
".",
"name",
",",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"\"cwl.input.json\"",
")",
")",
"jsonpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"\"cwl.input.json\"",
")",
"# build args and run",
"command_args",
"=",
"[",
"runner",
"]",
"+",
"extra",
"+",
"[",
"workflow_url",
",",
"jsonpath",
"]",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"command_args",
",",
"stdout",
"=",
"output",
",",
"stderr",
"=",
"stderr",
",",
"close_fds",
"=",
"True",
",",
"cwd",
"=",
"tempdir",
")",
"output",
".",
"close",
"(",
")",
"stderr",
".",
"close",
"(",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\"pid\"",
")",
",",
"\"w\"",
")",
"as",
"pid",
":",
"pid",
".",
"write",
"(",
"str",
"(",
"proc",
".",
"pid",
")",
")",
"return",
"self",
".",
"getstatus",
"(",
")"
] | 41.278689 | 24.360656 |
def edit(directory=None, revision='current'):
"""Edit current revision."""
if alembic_version >= (0, 8, 0):
config = current_app.extensions['migrate'].migrate.get_config(
directory)
command.edit(config, revision)
else:
raise RuntimeError('Alembic 0.8.0 or greater is required')
|
[
"def",
"edit",
"(",
"directory",
"=",
"None",
",",
"revision",
"=",
"'current'",
")",
":",
"if",
"alembic_version",
">=",
"(",
"0",
",",
"8",
",",
"0",
")",
":",
"config",
"=",
"current_app",
".",
"extensions",
"[",
"'migrate'",
"]",
".",
"migrate",
".",
"get_config",
"(",
"directory",
")",
"command",
".",
"edit",
"(",
"config",
",",
"revision",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Alembic 0.8.0 or greater is required'",
")"
] | 39.75 | 14.5 |
def _render_extended_error_message_list(self, extended_error):
"""Parse the ExtendedError object and retruns the message.
Build a list of decoded messages from the extended_error using the
message registries. An ExtendedError JSON object is a response from
the with its own schema. This function knows how to parse the
ExtendedError object and, using any loaded message registries,
render an array of plain language strings that represent
the response.
"""
messages = []
if isinstance(extended_error, dict):
if ('Type' in extended_error and
extended_error['Type'].startswith('ExtendedError.')):
for msg in extended_error['Messages']:
message_id = msg['MessageID']
x = message_id.split('.')
registry = x[0]
msgkey = x[len(x) - 1]
# if the correct message registry is loaded,
# do string resolution
if (registry in self.message_registries and msgkey in
self.message_registries[registry]['Messages']):
rmsgs = self.message_registries[registry]['Messages']
msg_dict = rmsgs[msgkey]
msg_str = message_id + ': ' + msg_dict['Message']
for argn in range(0, msg_dict['NumberOfArgs']):
subst = '%' + str(argn+1)
m = str(msg['MessageArgs'][argn])
msg_str = msg_str.replace(subst, m)
if ('Resolution' in msg_dict and
msg_dict['Resolution'] != 'None'):
msg_str += ' ' + msg_dict['Resolution']
messages.append(msg_str)
else:
# no message registry, simply return the msg object
# in string form
messages.append(str(message_id))
return messages
|
[
"def",
"_render_extended_error_message_list",
"(",
"self",
",",
"extended_error",
")",
":",
"messages",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"extended_error",
",",
"dict",
")",
":",
"if",
"(",
"'Type'",
"in",
"extended_error",
"and",
"extended_error",
"[",
"'Type'",
"]",
".",
"startswith",
"(",
"'ExtendedError.'",
")",
")",
":",
"for",
"msg",
"in",
"extended_error",
"[",
"'Messages'",
"]",
":",
"message_id",
"=",
"msg",
"[",
"'MessageID'",
"]",
"x",
"=",
"message_id",
".",
"split",
"(",
"'.'",
")",
"registry",
"=",
"x",
"[",
"0",
"]",
"msgkey",
"=",
"x",
"[",
"len",
"(",
"x",
")",
"-",
"1",
"]",
"# if the correct message registry is loaded,",
"# do string resolution",
"if",
"(",
"registry",
"in",
"self",
".",
"message_registries",
"and",
"msgkey",
"in",
"self",
".",
"message_registries",
"[",
"registry",
"]",
"[",
"'Messages'",
"]",
")",
":",
"rmsgs",
"=",
"self",
".",
"message_registries",
"[",
"registry",
"]",
"[",
"'Messages'",
"]",
"msg_dict",
"=",
"rmsgs",
"[",
"msgkey",
"]",
"msg_str",
"=",
"message_id",
"+",
"': '",
"+",
"msg_dict",
"[",
"'Message'",
"]",
"for",
"argn",
"in",
"range",
"(",
"0",
",",
"msg_dict",
"[",
"'NumberOfArgs'",
"]",
")",
":",
"subst",
"=",
"'%'",
"+",
"str",
"(",
"argn",
"+",
"1",
")",
"m",
"=",
"str",
"(",
"msg",
"[",
"'MessageArgs'",
"]",
"[",
"argn",
"]",
")",
"msg_str",
"=",
"msg_str",
".",
"replace",
"(",
"subst",
",",
"m",
")",
"if",
"(",
"'Resolution'",
"in",
"msg_dict",
"and",
"msg_dict",
"[",
"'Resolution'",
"]",
"!=",
"'None'",
")",
":",
"msg_str",
"+=",
"' '",
"+",
"msg_dict",
"[",
"'Resolution'",
"]",
"messages",
".",
"append",
"(",
"msg_str",
")",
"else",
":",
"# no message registry, simply return the msg object",
"# in string form",
"messages",
".",
"append",
"(",
"str",
"(",
"message_id",
")",
")",
"return",
"messages"
] | 47.227273 | 21.613636 |
def unpack_sver_response_version(packet):
"""For internal use. Unpack the version-related parts of an sver (aka
CMD_VERSION) response.
Parameters
----------
packet : :py:class:`~rig.machine_control.packets.SCPPacket`
The packet recieved in response to the version command.
Returns
-------
software_name : string
The name of the software running on the remote machine.
(major, minor, patch) : (int, int, int)
The numerical part of the semantic version number.
labels : string
Any labels in the version number (e.g. '-dev'). May be an empty string.
"""
software_name = packet.data.decode("utf-8")
legacy_version_field = packet.arg2 >> 16
if legacy_version_field != 0xFFFF:
# Legacy version encoding: just encoded in decimal fixed-point in the
# integer.
major = legacy_version_field // 100
minor = legacy_version_field % 100
patch = 0
labels = ""
else:
# Semantic Version encoding: packed after the null-terminator of the
# software name in the version string.
software_name, _, version_number = software_name.partition("\0")
match = VERSION_NUMBER_REGEX.match(version_number.rstrip("\0"))
assert match, "Malformed version number: {}".format(version_number)
major = int(match.group(1))
minor = int(match.group(2))
patch = int(match.group(3))
labels = match.group(4) or ""
return (software_name.rstrip("\0"), (major, minor, patch), labels)
|
[
"def",
"unpack_sver_response_version",
"(",
"packet",
")",
":",
"software_name",
"=",
"packet",
".",
"data",
".",
"decode",
"(",
"\"utf-8\"",
")",
"legacy_version_field",
"=",
"packet",
".",
"arg2",
">>",
"16",
"if",
"legacy_version_field",
"!=",
"0xFFFF",
":",
"# Legacy version encoding: just encoded in decimal fixed-point in the",
"# integer.",
"major",
"=",
"legacy_version_field",
"//",
"100",
"minor",
"=",
"legacy_version_field",
"%",
"100",
"patch",
"=",
"0",
"labels",
"=",
"\"\"",
"else",
":",
"# Semantic Version encoding: packed after the null-terminator of the",
"# software name in the version string.",
"software_name",
",",
"_",
",",
"version_number",
"=",
"software_name",
".",
"partition",
"(",
"\"\\0\"",
")",
"match",
"=",
"VERSION_NUMBER_REGEX",
".",
"match",
"(",
"version_number",
".",
"rstrip",
"(",
"\"\\0\"",
")",
")",
"assert",
"match",
",",
"\"Malformed version number: {}\"",
".",
"format",
"(",
"version_number",
")",
"major",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"minor",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"patch",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"3",
")",
")",
"labels",
"=",
"match",
".",
"group",
"(",
"4",
")",
"or",
"\"\"",
"return",
"(",
"software_name",
".",
"rstrip",
"(",
"\"\\0\"",
")",
",",
"(",
"major",
",",
"minor",
",",
"patch",
")",
",",
"labels",
")"
] | 35.27907 | 21.139535 |
def _update_capacity(self, data):
""" Update the consumed capacity metrics """
if 'ConsumedCapacity' in data:
# This is all for backwards compatibility
consumed = data['ConsumedCapacity']
if not isinstance(consumed, list):
consumed = [consumed]
for cap in consumed:
self.capacity += cap.get('CapacityUnits', 0)
self.table_capacity += cap.get('Table',
{}).get('CapacityUnits', 0)
local_indexes = cap.get('LocalSecondaryIndexes', {})
for k, v in six.iteritems(local_indexes):
self.indexes.setdefault(k, 0)
self.indexes[k] += v['CapacityUnits']
global_indexes = cap.get('GlobalSecondaryIndexes', {})
for k, v in six.iteritems(global_indexes):
self.global_indexes.setdefault(k, 0)
self.global_indexes[k] += v['CapacityUnits']
|
[
"def",
"_update_capacity",
"(",
"self",
",",
"data",
")",
":",
"if",
"'ConsumedCapacity'",
"in",
"data",
":",
"# This is all for backwards compatibility",
"consumed",
"=",
"data",
"[",
"'ConsumedCapacity'",
"]",
"if",
"not",
"isinstance",
"(",
"consumed",
",",
"list",
")",
":",
"consumed",
"=",
"[",
"consumed",
"]",
"for",
"cap",
"in",
"consumed",
":",
"self",
".",
"capacity",
"+=",
"cap",
".",
"get",
"(",
"'CapacityUnits'",
",",
"0",
")",
"self",
".",
"table_capacity",
"+=",
"cap",
".",
"get",
"(",
"'Table'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'CapacityUnits'",
",",
"0",
")",
"local_indexes",
"=",
"cap",
".",
"get",
"(",
"'LocalSecondaryIndexes'",
",",
"{",
"}",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"local_indexes",
")",
":",
"self",
".",
"indexes",
".",
"setdefault",
"(",
"k",
",",
"0",
")",
"self",
".",
"indexes",
"[",
"k",
"]",
"+=",
"v",
"[",
"'CapacityUnits'",
"]",
"global_indexes",
"=",
"cap",
".",
"get",
"(",
"'GlobalSecondaryIndexes'",
",",
"{",
"}",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"global_indexes",
")",
":",
"self",
".",
"global_indexes",
".",
"setdefault",
"(",
"k",
",",
"0",
")",
"self",
".",
"global_indexes",
"[",
"k",
"]",
"+=",
"v",
"[",
"'CapacityUnits'",
"]"
] | 52.947368 | 14.421053 |
def mkrngs(self):
"""
Transform boolean arrays into list of limit pairs.
Gets Time limits of signal/background boolean arrays and stores them as
sigrng and bkgrng arrays. These arrays can be saved by 'save_ranges' in
the analyse object.
"""
bbool = bool_2_indices(self.bkg)
if bbool is not None:
self.bkgrng = self.Time[bbool]
else:
self.bkgrng = [[np.nan, np.nan]]
sbool = bool_2_indices(self.sig)
if sbool is not None:
self.sigrng = self.Time[sbool]
else:
self.sigrng = [[np.nan, np.nan]]
tbool = bool_2_indices(self.trn)
if tbool is not None:
self.trnrng = self.Time[tbool]
else:
self.trnrng = [[np.nan, np.nan]]
self.ns = np.zeros(self.Time.size)
n = 1
for i in range(len(self.sig) - 1):
if self.sig[i]:
self.ns[i] = n
if self.sig[i] and ~self.sig[i + 1]:
n += 1
self.n = int(max(self.ns)) # record number of traces
return
|
[
"def",
"mkrngs",
"(",
"self",
")",
":",
"bbool",
"=",
"bool_2_indices",
"(",
"self",
".",
"bkg",
")",
"if",
"bbool",
"is",
"not",
"None",
":",
"self",
".",
"bkgrng",
"=",
"self",
".",
"Time",
"[",
"bbool",
"]",
"else",
":",
"self",
".",
"bkgrng",
"=",
"[",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
"]",
"sbool",
"=",
"bool_2_indices",
"(",
"self",
".",
"sig",
")",
"if",
"sbool",
"is",
"not",
"None",
":",
"self",
".",
"sigrng",
"=",
"self",
".",
"Time",
"[",
"sbool",
"]",
"else",
":",
"self",
".",
"sigrng",
"=",
"[",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
"]",
"tbool",
"=",
"bool_2_indices",
"(",
"self",
".",
"trn",
")",
"if",
"tbool",
"is",
"not",
"None",
":",
"self",
".",
"trnrng",
"=",
"self",
".",
"Time",
"[",
"tbool",
"]",
"else",
":",
"self",
".",
"trnrng",
"=",
"[",
"[",
"np",
".",
"nan",
",",
"np",
".",
"nan",
"]",
"]",
"self",
".",
"ns",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"Time",
".",
"size",
")",
"n",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"sig",
")",
"-",
"1",
")",
":",
"if",
"self",
".",
"sig",
"[",
"i",
"]",
":",
"self",
".",
"ns",
"[",
"i",
"]",
"=",
"n",
"if",
"self",
".",
"sig",
"[",
"i",
"]",
"and",
"~",
"self",
".",
"sig",
"[",
"i",
"+",
"1",
"]",
":",
"n",
"+=",
"1",
"self",
".",
"n",
"=",
"int",
"(",
"max",
"(",
"self",
".",
"ns",
")",
")",
"# record number of traces",
"return"
] | 31.911765 | 15.029412 |
def to_struct(self, value):
"""Cast `date` object to string."""
if self.str_format:
return value.strftime(self.str_format)
return value.strftime(self.default_format)
|
[
"def",
"to_struct",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"str_format",
":",
"return",
"value",
".",
"strftime",
"(",
"self",
".",
"str_format",
")",
"return",
"value",
".",
"strftime",
"(",
"self",
".",
"default_format",
")"
] | 39.4 | 9.2 |
def rotate_quat(attitude, roll, pitch, yaw):
'''
Returns rotated quaternion
:param attitude: quaternion [w, x, y , z]
:param roll: rotation in rad
:param pitch: rotation in rad
:param yaw: rotation in rad
:returns: quaternion [w, x, y , z]
'''
quat = Quaternion(attitude)
rotation = Quaternion([roll, pitch, yaw])
res = rotation * quat
return res.q
|
[
"def",
"rotate_quat",
"(",
"attitude",
",",
"roll",
",",
"pitch",
",",
"yaw",
")",
":",
"quat",
"=",
"Quaternion",
"(",
"attitude",
")",
"rotation",
"=",
"Quaternion",
"(",
"[",
"roll",
",",
"pitch",
",",
"yaw",
"]",
")",
"res",
"=",
"rotation",
"*",
"quat",
"return",
"res",
".",
"q"
] | 25.714286 | 15.714286 |
def _axis(self, axis):
"""
Return the corresponding labels taking into account the axis.
The axis could be horizontal (0) or vertical (1).
"""
return self.df.columns if axis == 0 else self.df.index
|
[
"def",
"_axis",
"(",
"self",
",",
"axis",
")",
":",
"return",
"self",
".",
"df",
".",
"columns",
"if",
"axis",
"==",
"0",
"else",
"self",
".",
"df",
".",
"index"
] | 34 | 18 |
def is_identity(self):
"""If `self` is I, returns True, otherwise False."""
if not self.terms:
return True
return len(self.terms) == 1 and not self.terms[0].ops and self.terms[0].coeff == 1.0
|
[
"def",
"is_identity",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"terms",
":",
"return",
"True",
"return",
"len",
"(",
"self",
".",
"terms",
")",
"==",
"1",
"and",
"not",
"self",
".",
"terms",
"[",
"0",
"]",
".",
"ops",
"and",
"self",
".",
"terms",
"[",
"0",
"]",
".",
"coeff",
"==",
"1.0"
] | 44.6 | 20.2 |
def json_qs_parser(body):
"""
Parses response body from JSON, XML or query string.
:param body:
string
:returns:
:class:`dict`, :class:`list` if input is JSON or query string,
:class:`xml.etree.ElementTree.Element` if XML.
"""
try:
# Try JSON first.
return json.loads(body)
except (OverflowError, TypeError, ValueError):
pass
try:
# Then XML.
return ElementTree.fromstring(body)
except (ElementTree.ParseError, TypeError, ValueError):
pass
# Finally query string.
return dict(parse.parse_qsl(body))
|
[
"def",
"json_qs_parser",
"(",
"body",
")",
":",
"try",
":",
"# Try JSON first.",
"return",
"json",
".",
"loads",
"(",
"body",
")",
"except",
"(",
"OverflowError",
",",
"TypeError",
",",
"ValueError",
")",
":",
"pass",
"try",
":",
"# Then XML.",
"return",
"ElementTree",
".",
"fromstring",
"(",
"body",
")",
"except",
"(",
"ElementTree",
".",
"ParseError",
",",
"TypeError",
",",
"ValueError",
")",
":",
"pass",
"# Finally query string.",
"return",
"dict",
"(",
"parse",
".",
"parse_qsl",
"(",
"body",
")",
")"
] | 22.846154 | 21.692308 |
def is_connected(H, source_node, target_node):
"""Checks if a target node is connected to a source node. That is,
this method determines if a target node can be visited from the source
node in the sense of the 'Visit' algorithm.
Refer to 'visit's documentation for more details.
:param H: the hypergraph to check connectedness on.
:param source_node: the node to check connectedness to.
:param target_node: the node to check connectedness of.
:returns: bool -- whether target_node can be visited from source_node.
"""
visited_nodes, Pv, Pe = visit(H, source_node)
return target_node in visited_nodes
|
[
"def",
"is_connected",
"(",
"H",
",",
"source_node",
",",
"target_node",
")",
":",
"visited_nodes",
",",
"Pv",
",",
"Pe",
"=",
"visit",
"(",
"H",
",",
"source_node",
")",
"return",
"target_node",
"in",
"visited_nodes"
] | 42.133333 | 18.466667 |
def _read_from_folder(self, dirname):
"""
Internal folder reader.
:type dirname: str
:param dirname: Folder to read from.
"""
templates = _par_read(dirname=dirname, compressed=False)
t_files = glob.glob(dirname + os.sep + '*.ms')
tribe_cat_file = glob.glob(os.path.join(dirname, "tribe_cat.*"))
if len(tribe_cat_file) != 0:
tribe_cat = read_events(tribe_cat_file[0])
else:
tribe_cat = Catalog()
previous_template_names = [t.name for t in self.templates]
for template in templates:
if template.name in previous_template_names:
# Don't read in for templates that we already have.
continue
for event in tribe_cat:
for comment in event.comments:
if comment.text == 'eqcorrscan_template_' + template.name:
template.event = event
t_file = [t for t in t_files
if t.split(os.sep)[-1] == template.name + '.ms']
if len(t_file) == 0:
print('No waveform for template: ' + template.name)
templates.remove(template)
continue
elif len(t_file) > 1:
print('Multiple waveforms found, using: ' + t_file[0])
template.st = read(t_file[0])
self.templates.extend(templates)
return
|
[
"def",
"_read_from_folder",
"(",
"self",
",",
"dirname",
")",
":",
"templates",
"=",
"_par_read",
"(",
"dirname",
"=",
"dirname",
",",
"compressed",
"=",
"False",
")",
"t_files",
"=",
"glob",
".",
"glob",
"(",
"dirname",
"+",
"os",
".",
"sep",
"+",
"'*.ms'",
")",
"tribe_cat_file",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"\"tribe_cat.*\"",
")",
")",
"if",
"len",
"(",
"tribe_cat_file",
")",
"!=",
"0",
":",
"tribe_cat",
"=",
"read_events",
"(",
"tribe_cat_file",
"[",
"0",
"]",
")",
"else",
":",
"tribe_cat",
"=",
"Catalog",
"(",
")",
"previous_template_names",
"=",
"[",
"t",
".",
"name",
"for",
"t",
"in",
"self",
".",
"templates",
"]",
"for",
"template",
"in",
"templates",
":",
"if",
"template",
".",
"name",
"in",
"previous_template_names",
":",
"# Don't read in for templates that we already have.",
"continue",
"for",
"event",
"in",
"tribe_cat",
":",
"for",
"comment",
"in",
"event",
".",
"comments",
":",
"if",
"comment",
".",
"text",
"==",
"'eqcorrscan_template_'",
"+",
"template",
".",
"name",
":",
"template",
".",
"event",
"=",
"event",
"t_file",
"=",
"[",
"t",
"for",
"t",
"in",
"t_files",
"if",
"t",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
"-",
"1",
"]",
"==",
"template",
".",
"name",
"+",
"'.ms'",
"]",
"if",
"len",
"(",
"t_file",
")",
"==",
"0",
":",
"print",
"(",
"'No waveform for template: '",
"+",
"template",
".",
"name",
")",
"templates",
".",
"remove",
"(",
"template",
")",
"continue",
"elif",
"len",
"(",
"t_file",
")",
">",
"1",
":",
"print",
"(",
"'Multiple waveforms found, using: '",
"+",
"t_file",
"[",
"0",
"]",
")",
"template",
".",
"st",
"=",
"read",
"(",
"t_file",
"[",
"0",
"]",
")",
"self",
".",
"templates",
".",
"extend",
"(",
"templates",
")",
"return"
] | 41.5 | 14.264706 |
def get_system_data() -> typing.Union[None, dict]:
"""
Returns information about the system in which Cauldron is running.
If the information cannot be found, None is returned instead.
:return:
Dictionary containing information about the Cauldron system, whic
includes:
* name
* location
* version
"""
site_packages = get_site_packages()
path_prefixes = [('[SP]', p) for p in site_packages]
path_prefixes.append(('[CORE]', sys.exec_prefix))
packages = [
module_to_package_data(name, entry, path_prefixes)
for name, entry in list(sys.modules.items())
]
python_data = dict(
version=list(sys.version_info),
executable=simplify_path(sys.executable),
directory=simplify_path(sys.exec_prefix),
site_packages=[simplify_path(sp) for sp in site_packages]
)
return dict(
python=python_data,
packages=[p for p in packages if p is not None]
)
|
[
"def",
"get_system_data",
"(",
")",
"->",
"typing",
".",
"Union",
"[",
"None",
",",
"dict",
"]",
":",
"site_packages",
"=",
"get_site_packages",
"(",
")",
"path_prefixes",
"=",
"[",
"(",
"'[SP]'",
",",
"p",
")",
"for",
"p",
"in",
"site_packages",
"]",
"path_prefixes",
".",
"append",
"(",
"(",
"'[CORE]'",
",",
"sys",
".",
"exec_prefix",
")",
")",
"packages",
"=",
"[",
"module_to_package_data",
"(",
"name",
",",
"entry",
",",
"path_prefixes",
")",
"for",
"name",
",",
"entry",
"in",
"list",
"(",
"sys",
".",
"modules",
".",
"items",
"(",
")",
")",
"]",
"python_data",
"=",
"dict",
"(",
"version",
"=",
"list",
"(",
"sys",
".",
"version_info",
")",
",",
"executable",
"=",
"simplify_path",
"(",
"sys",
".",
"executable",
")",
",",
"directory",
"=",
"simplify_path",
"(",
"sys",
".",
"exec_prefix",
")",
",",
"site_packages",
"=",
"[",
"simplify_path",
"(",
"sp",
")",
"for",
"sp",
"in",
"site_packages",
"]",
")",
"return",
"dict",
"(",
"python",
"=",
"python_data",
",",
"packages",
"=",
"[",
"p",
"for",
"p",
"in",
"packages",
"if",
"p",
"is",
"not",
"None",
"]",
")"
] | 29.242424 | 21.787879 |
def copy(self, h5file=None):
"""Create a copy of the current instance
This is done by recursively copying the underlying hdf5 data.
Parameters
----------
h5file: str, h5py.File, h5py.Group, or None
see `QPImage.__init__`
"""
h5 = copyh5(self.h5, h5file)
return QPImage(h5file=h5, h5dtype=self.h5dtype)
|
[
"def",
"copy",
"(",
"self",
",",
"h5file",
"=",
"None",
")",
":",
"h5",
"=",
"copyh5",
"(",
"self",
".",
"h5",
",",
"h5file",
")",
"return",
"QPImage",
"(",
"h5file",
"=",
"h5",
",",
"h5dtype",
"=",
"self",
".",
"h5dtype",
")"
] | 30.666667 | 16.75 |
def CA_code_header(fname_out, Nca):
"""
Write 1023 bit CA (Gold) Code Header Files
Mark Wickert February 2015
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
ca = loadtxt(dir_path + '/ca1thru37.txt', dtype=int16, usecols=(Nca - 1,), unpack=True)
M = 1023 # code period
N = 23 # code bits per line
Sca = 'ca' + str(Nca)
f = open(fname_out, 'wt')
f.write('//define a CA code\n\n')
f.write('#include <stdint.h>\n\n')
f.write('#ifndef N_CA\n')
f.write('#define N_CA %d\n' % M)
f.write('#endif\n')
f.write('/*******************************************************************/\n');
f.write('/* 1023 Bit CA Gold Code %2d */\n' \
% Nca);
f.write('int8_t ca%d[N_CA] = {' % Nca)
kk = 0;
for k in range(M):
# k_mod = k % M
if (kk < N - 1) and (k < M - 1):
f.write('%d,' % ca[k])
kk += 1
elif (kk == N - 1) & (k < M - 1):
f.write('%d,\n' % ca[k])
if k < M:
if Nca < 10:
f.write(' ')
else:
f.write(' ')
kk = 0
else:
f.write('%d' % ca[k])
f.write('};\n')
f.write('/*******************************************************************/\n')
f.close()
|
[
"def",
"CA_code_header",
"(",
"fname_out",
",",
"Nca",
")",
":",
"dir_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"ca",
"=",
"loadtxt",
"(",
"dir_path",
"+",
"'/ca1thru37.txt'",
",",
"dtype",
"=",
"int16",
",",
"usecols",
"=",
"(",
"Nca",
"-",
"1",
",",
")",
",",
"unpack",
"=",
"True",
")",
"M",
"=",
"1023",
"# code period\r",
"N",
"=",
"23",
"# code bits per line\r",
"Sca",
"=",
"'ca'",
"+",
"str",
"(",
"Nca",
")",
"f",
"=",
"open",
"(",
"fname_out",
",",
"'wt'",
")",
"f",
".",
"write",
"(",
"'//define a CA code\\n\\n'",
")",
"f",
".",
"write",
"(",
"'#include <stdint.h>\\n\\n'",
")",
"f",
".",
"write",
"(",
"'#ifndef N_CA\\n'",
")",
"f",
".",
"write",
"(",
"'#define N_CA %d\\n'",
"%",
"M",
")",
"f",
".",
"write",
"(",
"'#endif\\n'",
")",
"f",
".",
"write",
"(",
"'/*******************************************************************/\\n'",
")",
"f",
".",
"write",
"(",
"'/* 1023 Bit CA Gold Code %2d */\\n'",
"%",
"Nca",
")",
"f",
".",
"write",
"(",
"'int8_t ca%d[N_CA] = {'",
"%",
"Nca",
")",
"kk",
"=",
"0",
"for",
"k",
"in",
"range",
"(",
"M",
")",
":",
"# k_mod = k % M\r",
"if",
"(",
"kk",
"<",
"N",
"-",
"1",
")",
"and",
"(",
"k",
"<",
"M",
"-",
"1",
")",
":",
"f",
".",
"write",
"(",
"'%d,'",
"%",
"ca",
"[",
"k",
"]",
")",
"kk",
"+=",
"1",
"elif",
"(",
"kk",
"==",
"N",
"-",
"1",
")",
"&",
"(",
"k",
"<",
"M",
"-",
"1",
")",
":",
"f",
".",
"write",
"(",
"'%d,\\n'",
"%",
"ca",
"[",
"k",
"]",
")",
"if",
"k",
"<",
"M",
":",
"if",
"Nca",
"<",
"10",
":",
"f",
".",
"write",
"(",
"' '",
")",
"else",
":",
"f",
".",
"write",
"(",
"' '",
")",
"kk",
"=",
"0",
"else",
":",
"f",
".",
"write",
"(",
"'%d'",
"%",
"ca",
"[",
"k",
"]",
")",
"f",
".",
"write",
"(",
"'};\\n'",
")",
"f",
".",
"write",
"(",
"'/*******************************************************************/\\n'",
")",
"f",
".",
"close",
"(",
")"
] | 34.414634 | 16.365854 |
def css(self, path):
"""
Link/embed CSS file.
"""
if self.settings.embed_content:
content = codecs.open(path, 'r', encoding='utf8').read()
tag = Style(content, type="text/css")
else:
tag = Link(href=path, rel="stylesheet", type_="text/css")
self.head.append(tag)
|
[
"def",
"css",
"(",
"self",
",",
"path",
")",
":",
"if",
"self",
".",
"settings",
".",
"embed_content",
":",
"content",
"=",
"codecs",
".",
"open",
"(",
"path",
",",
"'r'",
",",
"encoding",
"=",
"'utf8'",
")",
".",
"read",
"(",
")",
"tag",
"=",
"Style",
"(",
"content",
",",
"type",
"=",
"\"text/css\"",
")",
"else",
":",
"tag",
"=",
"Link",
"(",
"href",
"=",
"path",
",",
"rel",
"=",
"\"stylesheet\"",
",",
"type_",
"=",
"\"text/css\"",
")",
"self",
".",
"head",
".",
"append",
"(",
"tag",
")"
] | 33.7 | 13.7 |
def accepts_valid_urls(func):
"""Return a wrapper that runs given method only for valid URLs.
:param func: a method to be wrapped
:returns: a wrapper that adds argument validation
"""
@functools.wraps(func)
def wrapper(obj, urls, *args, **kwargs):
"""Run the function and return a value for valid URLs.
:param obj: an object in whose class f is defined
:param urls: an iterable containing URLs
:returns: a return value of the function f
:raises InvalidURLError: if the iterable contains invalid URLs
"""
invalid_urls = [u for u in urls if not is_valid_url(u)]
if invalid_urls:
msg_tpl = 'The values: {} are not valid URLs'
msg = msg_tpl.format(','.join(invalid_urls))
raise InvalidURLError(msg)
return func(obj, urls, *args, **kwargs)
return wrapper
|
[
"def",
"accepts_valid_urls",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"obj",
",",
"urls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Run the function and return a value for valid URLs.\n\n :param obj: an object in whose class f is defined\n :param urls: an iterable containing URLs\n :returns: a return value of the function f\n :raises InvalidURLError: if the iterable contains invalid URLs\n \"\"\"",
"invalid_urls",
"=",
"[",
"u",
"for",
"u",
"in",
"urls",
"if",
"not",
"is_valid_url",
"(",
"u",
")",
"]",
"if",
"invalid_urls",
":",
"msg_tpl",
"=",
"'The values: {} are not valid URLs'",
"msg",
"=",
"msg_tpl",
".",
"format",
"(",
"','",
".",
"join",
"(",
"invalid_urls",
")",
")",
"raise",
"InvalidURLError",
"(",
"msg",
")",
"return",
"func",
"(",
"obj",
",",
"urls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] | 39.363636 | 13.227273 |
def _generateEncoderStringsV2(includedFields, options):
""" Generate and return the following encoder related substitution variables:
encoderSpecsStr:
For the base description file, this string defines the default
encoding dicts for each encoder. For example:
__gym_encoder = { 'fieldname': 'gym',
'n': 13,
'name': 'gym',
'type': 'SDRCategoryEncoder',
'w': 7},
__address_encoder = { 'fieldname': 'address',
'n': 13,
'name': 'address',
'type': 'SDRCategoryEncoder',
'w': 7}
permEncoderChoicesStr:
For the permutations file, this defines the possible
encoder dicts for each encoder. For example:
'__gym_encoder' : PermuteEncoder('gym', 'SDRCategoryEncoder', w=7,
n=100),
'__address_encoder' : PermuteEncoder('address', 'SDRCategoryEncoder',
w=7, n=100),
'__timestamp_dayOfWeek_encoder' : PermuteEncoder('timestamp',
'DateEncoder.timeOfDay', w=7, radius=PermuteChoices([1, 8])),
'__consumption_encoder': PermuteEncoder('consumption', 'AdaptiveScalarEncoder',
w=7, n=PermuteInt(13, 500, 20), minval=0,
maxval=PermuteInt(100, 300, 25)),
Parameters:
--------------------------------------------------
includedFields: item from the 'includedFields' section of the
description JSON object. This is a list of dicts, each
dict defining the field name, type, and optional min
and max values.
retval: (encoderSpecsStr permEncoderChoicesStr)
"""
width = 21
encoderDictsList = []
# If this is a NontemporalClassification experiment, then the
# the "predicted" field (the classification value) should be marked to ONLY
# go to the classifier
if options['inferenceType'] in ["NontemporalClassification",
"NontemporalMultiStep",
"TemporalMultiStep",
"MultiStep"]:
classifierOnlyField = options['inferenceArgs']['predictedField']
else:
classifierOnlyField = None
# ==========================================================================
# For each field, generate the default encoding dict and PermuteEncoder
# constructor arguments
for fieldInfo in includedFields:
fieldName = fieldInfo['fieldName']
fieldType = fieldInfo['fieldType']
# ---------
# Scalar?
if fieldType in ['float', 'int']:
# n=100 is reasonably hardcoded value for n when used by description.py
# The swarming will use PermuteEncoder below, where n is variable and
# depends on w
runDelta = fieldInfo.get("runDelta", False)
if runDelta or "space" in fieldInfo:
encoderDict = dict(type='ScalarSpaceEncoder', name=fieldName,
fieldname=fieldName, n=100, w=width, clipInput=True)
if runDelta:
encoderDict["runDelta"] = True
else:
encoderDict = dict(type='AdaptiveScalarEncoder', name=fieldName,
fieldname=fieldName, n=100, w=width, clipInput=True)
if 'minValue' in fieldInfo:
encoderDict['minval'] = fieldInfo['minValue']
if 'maxValue' in fieldInfo:
encoderDict['maxval'] = fieldInfo['maxValue']
# If both min and max were specified, use a non-adaptive encoder
if ('minValue' in fieldInfo and 'maxValue' in fieldInfo) \
and (encoderDict['type'] == 'AdaptiveScalarEncoder'):
encoderDict['type'] = 'ScalarEncoder'
# Defaults may have been over-ridden by specifying an encoder type
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
if 'space' in fieldInfo:
encoderDict['space'] = fieldInfo['space']
encoderDictsList.append(encoderDict)
# ---------
# String?
elif fieldType == 'string':
encoderDict = dict(type='SDRCategoryEncoder', name=fieldName,
fieldname=fieldName, n=100+width, w=width)
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
# ---------
# Datetime?
elif fieldType == 'datetime':
# First, the time of day representation
encoderDict = dict(type='DateEncoder', name='%s_timeOfDay' % (fieldName),
fieldname=fieldName, timeOfDay=(width, 1))
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
# Now, the day of week representation
encoderDict = dict(type='DateEncoder', name='%s_dayOfWeek' % (fieldName),
fieldname=fieldName, dayOfWeek=(width, 1))
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
# Now, the day of week representation
encoderDict = dict(type='DateEncoder', name='%s_weekend' % (fieldName),
fieldname=fieldName, weekend=(width))
if 'encoderType' in fieldInfo:
encoderDict['type'] = fieldInfo['encoderType']
encoderDictsList.append(encoderDict)
else:
raise RuntimeError("Unsupported field type '%s'" % (fieldType))
# -----------------------------------------------------------------------
# If this was the predicted field, insert another encoder that sends it
# to the classifier only
if fieldName == classifierOnlyField:
clEncoderDict = dict(encoderDict)
clEncoderDict['classifierOnly'] = True
clEncoderDict['name'] = '_classifierInput'
encoderDictsList.append(clEncoderDict)
# If the predicted field needs to be excluded, take it out of the encoder
# lists
if options["inferenceArgs"]["inputPredictedField"] == "no":
encoderDictsList.remove(encoderDict)
# Remove any encoders not in fixedFields
if options.get('fixedFields') is not None:
tempList=[]
for encoderDict in encoderDictsList:
if encoderDict['name'] in options['fixedFields']:
tempList.append(encoderDict)
encoderDictsList = tempList
# ==========================================================================
# Now generate the encoderSpecsStr and permEncoderChoicesStr strings from
# encoderDictsList and constructorStringList
encoderSpecsList = []
permEncoderChoicesList = []
for encoderDict in encoderDictsList:
if encoderDict['name'].find('\\') >= 0:
raise _ExpGeneratorException("Illegal character in field: '\\'")
# Check for bad characters
for c in _ILLEGAL_FIELDNAME_CHARACTERS:
if encoderDict['name'].find(c) >= 0:
raise _ExpGeneratorException("Illegal character %s in field %r" %(c, encoderDict['name']))
constructorStr = _generatePermEncoderStr(options, encoderDict)
encoderKey = _quoteAndEscape(encoderDict['name'])
encoderSpecsList.append("%s: %s%s" % (
encoderKey,
2*_ONE_INDENT,
pprint.pformat(encoderDict, indent=2*_INDENT_STEP)))
# Each permEncoderChoicesStr is of the form:
# PermuteEncoder('gym', 'SDRCategoryEncoder',
# w=7, n=100),
permEncoderChoicesList.append("%s: %s," % (encoderKey, constructorStr))
# Join into strings
encoderSpecsStr = ',\n '.join(encoderSpecsList)
permEncoderChoicesStr = '\n'.join(permEncoderChoicesList)
permEncoderChoicesStr = _indentLines(permEncoderChoicesStr, 1,
indentFirstLine=True)
# Return results
return (encoderSpecsStr, permEncoderChoicesStr)
|
[
"def",
"_generateEncoderStringsV2",
"(",
"includedFields",
",",
"options",
")",
":",
"width",
"=",
"21",
"encoderDictsList",
"=",
"[",
"]",
"# If this is a NontemporalClassification experiment, then the",
"# the \"predicted\" field (the classification value) should be marked to ONLY",
"# go to the classifier",
"if",
"options",
"[",
"'inferenceType'",
"]",
"in",
"[",
"\"NontemporalClassification\"",
",",
"\"NontemporalMultiStep\"",
",",
"\"TemporalMultiStep\"",
",",
"\"MultiStep\"",
"]",
":",
"classifierOnlyField",
"=",
"options",
"[",
"'inferenceArgs'",
"]",
"[",
"'predictedField'",
"]",
"else",
":",
"classifierOnlyField",
"=",
"None",
"# ==========================================================================",
"# For each field, generate the default encoding dict and PermuteEncoder",
"# constructor arguments",
"for",
"fieldInfo",
"in",
"includedFields",
":",
"fieldName",
"=",
"fieldInfo",
"[",
"'fieldName'",
"]",
"fieldType",
"=",
"fieldInfo",
"[",
"'fieldType'",
"]",
"# ---------",
"# Scalar?",
"if",
"fieldType",
"in",
"[",
"'float'",
",",
"'int'",
"]",
":",
"# n=100 is reasonably hardcoded value for n when used by description.py",
"# The swarming will use PermuteEncoder below, where n is variable and",
"# depends on w",
"runDelta",
"=",
"fieldInfo",
".",
"get",
"(",
"\"runDelta\"",
",",
"False",
")",
"if",
"runDelta",
"or",
"\"space\"",
"in",
"fieldInfo",
":",
"encoderDict",
"=",
"dict",
"(",
"type",
"=",
"'ScalarSpaceEncoder'",
",",
"name",
"=",
"fieldName",
",",
"fieldname",
"=",
"fieldName",
",",
"n",
"=",
"100",
",",
"w",
"=",
"width",
",",
"clipInput",
"=",
"True",
")",
"if",
"runDelta",
":",
"encoderDict",
"[",
"\"runDelta\"",
"]",
"=",
"True",
"else",
":",
"encoderDict",
"=",
"dict",
"(",
"type",
"=",
"'AdaptiveScalarEncoder'",
",",
"name",
"=",
"fieldName",
",",
"fieldname",
"=",
"fieldName",
",",
"n",
"=",
"100",
",",
"w",
"=",
"width",
",",
"clipInput",
"=",
"True",
")",
"if",
"'minValue'",
"in",
"fieldInfo",
":",
"encoderDict",
"[",
"'minval'",
"]",
"=",
"fieldInfo",
"[",
"'minValue'",
"]",
"if",
"'maxValue'",
"in",
"fieldInfo",
":",
"encoderDict",
"[",
"'maxval'",
"]",
"=",
"fieldInfo",
"[",
"'maxValue'",
"]",
"# If both min and max were specified, use a non-adaptive encoder",
"if",
"(",
"'minValue'",
"in",
"fieldInfo",
"and",
"'maxValue'",
"in",
"fieldInfo",
")",
"and",
"(",
"encoderDict",
"[",
"'type'",
"]",
"==",
"'AdaptiveScalarEncoder'",
")",
":",
"encoderDict",
"[",
"'type'",
"]",
"=",
"'ScalarEncoder'",
"# Defaults may have been over-ridden by specifying an encoder type",
"if",
"'encoderType'",
"in",
"fieldInfo",
":",
"encoderDict",
"[",
"'type'",
"]",
"=",
"fieldInfo",
"[",
"'encoderType'",
"]",
"if",
"'space'",
"in",
"fieldInfo",
":",
"encoderDict",
"[",
"'space'",
"]",
"=",
"fieldInfo",
"[",
"'space'",
"]",
"encoderDictsList",
".",
"append",
"(",
"encoderDict",
")",
"# ---------",
"# String?",
"elif",
"fieldType",
"==",
"'string'",
":",
"encoderDict",
"=",
"dict",
"(",
"type",
"=",
"'SDRCategoryEncoder'",
",",
"name",
"=",
"fieldName",
",",
"fieldname",
"=",
"fieldName",
",",
"n",
"=",
"100",
"+",
"width",
",",
"w",
"=",
"width",
")",
"if",
"'encoderType'",
"in",
"fieldInfo",
":",
"encoderDict",
"[",
"'type'",
"]",
"=",
"fieldInfo",
"[",
"'encoderType'",
"]",
"encoderDictsList",
".",
"append",
"(",
"encoderDict",
")",
"# ---------",
"# Datetime?",
"elif",
"fieldType",
"==",
"'datetime'",
":",
"# First, the time of day representation",
"encoderDict",
"=",
"dict",
"(",
"type",
"=",
"'DateEncoder'",
",",
"name",
"=",
"'%s_timeOfDay'",
"%",
"(",
"fieldName",
")",
",",
"fieldname",
"=",
"fieldName",
",",
"timeOfDay",
"=",
"(",
"width",
",",
"1",
")",
")",
"if",
"'encoderType'",
"in",
"fieldInfo",
":",
"encoderDict",
"[",
"'type'",
"]",
"=",
"fieldInfo",
"[",
"'encoderType'",
"]",
"encoderDictsList",
".",
"append",
"(",
"encoderDict",
")",
"# Now, the day of week representation",
"encoderDict",
"=",
"dict",
"(",
"type",
"=",
"'DateEncoder'",
",",
"name",
"=",
"'%s_dayOfWeek'",
"%",
"(",
"fieldName",
")",
",",
"fieldname",
"=",
"fieldName",
",",
"dayOfWeek",
"=",
"(",
"width",
",",
"1",
")",
")",
"if",
"'encoderType'",
"in",
"fieldInfo",
":",
"encoderDict",
"[",
"'type'",
"]",
"=",
"fieldInfo",
"[",
"'encoderType'",
"]",
"encoderDictsList",
".",
"append",
"(",
"encoderDict",
")",
"# Now, the day of week representation",
"encoderDict",
"=",
"dict",
"(",
"type",
"=",
"'DateEncoder'",
",",
"name",
"=",
"'%s_weekend'",
"%",
"(",
"fieldName",
")",
",",
"fieldname",
"=",
"fieldName",
",",
"weekend",
"=",
"(",
"width",
")",
")",
"if",
"'encoderType'",
"in",
"fieldInfo",
":",
"encoderDict",
"[",
"'type'",
"]",
"=",
"fieldInfo",
"[",
"'encoderType'",
"]",
"encoderDictsList",
".",
"append",
"(",
"encoderDict",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Unsupported field type '%s'\"",
"%",
"(",
"fieldType",
")",
")",
"# -----------------------------------------------------------------------",
"# If this was the predicted field, insert another encoder that sends it",
"# to the classifier only",
"if",
"fieldName",
"==",
"classifierOnlyField",
":",
"clEncoderDict",
"=",
"dict",
"(",
"encoderDict",
")",
"clEncoderDict",
"[",
"'classifierOnly'",
"]",
"=",
"True",
"clEncoderDict",
"[",
"'name'",
"]",
"=",
"'_classifierInput'",
"encoderDictsList",
".",
"append",
"(",
"clEncoderDict",
")",
"# If the predicted field needs to be excluded, take it out of the encoder",
"# lists",
"if",
"options",
"[",
"\"inferenceArgs\"",
"]",
"[",
"\"inputPredictedField\"",
"]",
"==",
"\"no\"",
":",
"encoderDictsList",
".",
"remove",
"(",
"encoderDict",
")",
"# Remove any encoders not in fixedFields",
"if",
"options",
".",
"get",
"(",
"'fixedFields'",
")",
"is",
"not",
"None",
":",
"tempList",
"=",
"[",
"]",
"for",
"encoderDict",
"in",
"encoderDictsList",
":",
"if",
"encoderDict",
"[",
"'name'",
"]",
"in",
"options",
"[",
"'fixedFields'",
"]",
":",
"tempList",
".",
"append",
"(",
"encoderDict",
")",
"encoderDictsList",
"=",
"tempList",
"# ==========================================================================",
"# Now generate the encoderSpecsStr and permEncoderChoicesStr strings from",
"# encoderDictsList and constructorStringList",
"encoderSpecsList",
"=",
"[",
"]",
"permEncoderChoicesList",
"=",
"[",
"]",
"for",
"encoderDict",
"in",
"encoderDictsList",
":",
"if",
"encoderDict",
"[",
"'name'",
"]",
".",
"find",
"(",
"'\\\\'",
")",
">=",
"0",
":",
"raise",
"_ExpGeneratorException",
"(",
"\"Illegal character in field: '\\\\'\"",
")",
"# Check for bad characters",
"for",
"c",
"in",
"_ILLEGAL_FIELDNAME_CHARACTERS",
":",
"if",
"encoderDict",
"[",
"'name'",
"]",
".",
"find",
"(",
"c",
")",
">=",
"0",
":",
"raise",
"_ExpGeneratorException",
"(",
"\"Illegal character %s in field %r\"",
"%",
"(",
"c",
",",
"encoderDict",
"[",
"'name'",
"]",
")",
")",
"constructorStr",
"=",
"_generatePermEncoderStr",
"(",
"options",
",",
"encoderDict",
")",
"encoderKey",
"=",
"_quoteAndEscape",
"(",
"encoderDict",
"[",
"'name'",
"]",
")",
"encoderSpecsList",
".",
"append",
"(",
"\"%s: %s%s\"",
"%",
"(",
"encoderKey",
",",
"2",
"*",
"_ONE_INDENT",
",",
"pprint",
".",
"pformat",
"(",
"encoderDict",
",",
"indent",
"=",
"2",
"*",
"_INDENT_STEP",
")",
")",
")",
"# Each permEncoderChoicesStr is of the form:",
"# PermuteEncoder('gym', 'SDRCategoryEncoder',",
"# w=7, n=100),",
"permEncoderChoicesList",
".",
"append",
"(",
"\"%s: %s,\"",
"%",
"(",
"encoderKey",
",",
"constructorStr",
")",
")",
"# Join into strings",
"encoderSpecsStr",
"=",
"',\\n '",
".",
"join",
"(",
"encoderSpecsList",
")",
"permEncoderChoicesStr",
"=",
"'\\n'",
".",
"join",
"(",
"permEncoderChoicesList",
")",
"permEncoderChoicesStr",
"=",
"_indentLines",
"(",
"permEncoderChoicesStr",
",",
"1",
",",
"indentFirstLine",
"=",
"True",
")",
"# Return results",
"return",
"(",
"encoderSpecsStr",
",",
"permEncoderChoicesStr",
")"
] | 34.252294 | 23.206422 |
def run_steps(args: argparse.Namespace):
"""Run all steps required to complete task. Called directly from main."""
logging.basicConfig(level=logging.INFO, format="sockeye.autopilot: %(message)s")
# (1) Establish task
logging.info("=== Start Autopilot ===")
# Listed task
if args.task:
task = TASKS[args.task]
logging.info("Task: %s", task.description)
logging.info("URL: %s", task.url)
def report_data(file_sets):
for file_set in file_sets:
for fname in file_set[:2]:
logging.info(" %s", fname)
logging.info(" Train:")
report_data(task.train)
logging.info(" Dev:")
report_data(task.dev)
logging.info(" Test:")
report_data(task.test)
# Custom task
else:
logging.info("Task: custom")
# Source and target language codes
lang_codes = (task.src_lang, task.trg_lang) if args.task else args.custom_lang
# (2) Establish workspace and task directories
logging.info("=== Establish working directories ===")
logging.info("Workspace: %s", args.workspace)
special_fname = os.path.join(args.workspace, FILE_WORKSPACE)
if not os.path.exists(args.workspace):
logging.info("Create: %s", args.workspace)
os.makedirs(args.workspace)
touch_file(special_fname)
else:
if not os.path.exists(special_fname):
raise RuntimeError("Directory %s exists but %s does not, stopping to avoid overwriting files in non-workspace directory"
% (args.workspace, special_fname))
dir_third_party = os.path.join(args.workspace, third_party.DIR_THIRD_PARTY)
dir_cache = os.path.join(args.workspace, DIR_CACHE)
dir_logs = os.path.join(args.workspace, DIR_LOGS)
dir_systems = os.path.join(args.workspace, DIR_SYSTEMS)
task_name = args.task if args.task else args.custom_task
if args.test:
task_name += SUFFIX_TEST
dir_task = os.path.join(dir_systems, task_name)
for dirname in (dir_third_party, dir_cache, dir_logs, dir_systems, dir_task):
if os.path.exists(dirname):
logging.info("Exists: %s", dirname)
else:
logging.info("Create: %s", dirname)
os.makedirs(dirname)
# (3) Checkout necessary tools
logging.info("=== Checkout third-party tools ===")
# Requires tokenization?
if args.task or args.custom_text_type == CUSTOM_UTF8_RAW:
third_party.checkout_moses_tokenizer(args.workspace)
# Requires byte-pair encoding?
if args.task or args.custom_text_type in (CUSTOM_UTF8_RAW, CUSTOM_UTF8_TOK):
third_party.checkout_subword_nmt(args.workspace)
# (4) Populate train/dev/test data
# This step also normalizes whitespace on data population or copy, ensuring
# that for all input data, only ASCII newlines are considered line breaks.
logging.info("=== Populate train/dev/test data ===")
step_dir_raw = os.path.join(dir_task, DIR_DATA, DIR_RAW)
complete_fname = os.path.join(step_dir_raw, FILE_COMPLETE)
if os.path.exists(complete_fname):
logging.info("Re-use completed step: %s", step_dir_raw)
else:
# Listed task
if args.task:
raw_files = identify_raw_files(task, test_mode=args.test)
with tempfile.TemporaryDirectory(prefix="raw.", dir=dir_task) as raw_dir:
# Download (or locate in cache) and extract raw files to temp directory
logging.info("=== Download and extract raw files ===")
download_extract_raw_files(raw_files, dir_cache, raw_dir)
# Copy required files to train/dev/test
logging.info("=== Create input data files ===")
renew_step_dir(step_dir_raw)
# Test mode uses the full test set as training data and the
# first line of the test set as dev and test data
populate_parallel_text(raw_dir,
task.test if args.test else task.train,
os.path.join(step_dir_raw, PREFIX_TRAIN),
False)
populate_parallel_text(raw_dir,
task.test if args.test else task.dev,
os.path.join(step_dir_raw, PREFIX_DEV),
False,
head_n=1 if args.test else 0)
populate_parallel_text(raw_dir,
task.test,
os.path.join(step_dir_raw, PREFIX_TEST),
True,
head_n=1 if args.test else 0)
# Custom task
else:
logging.info("=== Copy input data files ===")
renew_step_dir(step_dir_raw)
copy_parallel_text(args.custom_train, os.path.join(step_dir_raw, PREFIX_TRAIN))
copy_parallel_text(args.custom_dev, os.path.join(step_dir_raw, PREFIX_DEV))
copy_parallel_text(args.custom_test, os.path.join(step_dir_raw, PREFIX_TEST))
# Record success
touch_file(complete_fname)
logging.info("Step complete: %s", step_dir_raw)
# (5) Tokenize train/dev/test data
# Task requires tokenization if _any_ raw file is not already tokenized
requires_tokenization = False
if args.task:
for file_sets in (task.train, task.dev, task.test):
for _, _, text_type in file_sets:
if text_type in TEXT_REQUIRES_TOKENIZATION:
requires_tokenization = True
else:
if args.custom_text_type == CUSTOM_UTF8_RAW:
requires_tokenization = True
logging.info("=== Tokenize train/dev/test data ===")
step_dir_tok = os.path.join(dir_task, DIR_DATA, DIR_TOK)
complete_fname = os.path.join(step_dir_tok, FILE_COMPLETE)
if os.path.exists(complete_fname):
logging.info("Re-use completed step: %s", step_dir_tok)
else:
renew_step_dir(step_dir_tok)
# Tokenize each data file using the appropriate language code OR link
# raw file if already tokenized.
for fname in os.listdir(step_dir_raw):
if fname.startswith("."):
continue
input_fname = os.path.join(step_dir_raw, fname)
output_fname = os.path.join(step_dir_tok, fname)
if requires_tokenization:
lang_code = lang_codes[0] if fname.endswith(SUFFIX_SRC_GZ) else lang_codes[1]
logging.info("Tokenize (%s): %s -> %s", lang_code, input_fname, output_fname)
third_party.call_moses_tokenizer(workspace_dir=args.workspace,
input_fname=input_fname,
output_fname=output_fname,
lang_code=lang_code)
else:
logging.info("Link pre-tokenized: %s -> %s", input_fname, output_fname)
os.symlink(os.path.join("..", DIR_RAW, fname), output_fname)
# Record success
touch_file(complete_fname)
logging.info("Step complete: %s", step_dir_tok)
# (6) Learn byte-pair encoding model
# Task requires byte-pair encoding unless using pre-encoded custom data
skip_bpe = (not args.task) and args.custom_text_type == CUSTOM_UTF8_BPE
logging.info("=== Learn byte-pair encoding model ===")
step_dir_bpe_model = os.path.join(dir_task, DIR_BPE_MODEL)
complete_fname = os.path.join(step_dir_bpe_model, FILE_COMPLETE)
if os.path.exists(complete_fname):
logging.info("Re-use completed step: %s", step_dir_bpe_model)
else:
renew_step_dir(step_dir_bpe_model)
if skip_bpe:
logging.info("BPE model not required for pre-encoded data")
else:
source_fname = os.path.join(step_dir_tok, PREFIX_TRAIN + SUFFIX_SRC_GZ)
target_fname = os.path.join(step_dir_tok, PREFIX_TRAIN + SUFFIX_TRG_GZ)
codes_fname = os.path.join(step_dir_bpe_model, FILE_BPE_CODES)
num_ops = task.bpe_op if args.task else args.custom_bpe_op
if args.test:
num_ops = TEST_BPE_OPS
logging.info("BPE Learn (%s): %s + %s -> %s", num_ops, source_fname, target_fname, codes_fname)
third_party.call_learn_bpe(workspace_dir=args.workspace,
source_fname=source_fname,
target_fname=target_fname,
model_fname=codes_fname,
num_ops=num_ops)
# Record success
touch_file(complete_fname)
logging.info("Step complete: %s", step_dir_bpe_model)
# (7) Byte-pair encode data
logging.info("=== Byte-pair encode train/dev/test data ===")
step_dir_bpe = os.path.join(dir_task, DIR_DATA, DIR_BPE)
complete_fname = os.path.join(step_dir_bpe, FILE_COMPLETE)
if os.path.exists(complete_fname):
logging.info("Re-use completed step: %s", step_dir_bpe)
else:
renew_step_dir(step_dir_bpe)
# Encode each data file
for fname in os.listdir(step_dir_tok):
if fname.startswith("."):
continue
input_fname = os.path.join(step_dir_tok, fname)
output_fname = os.path.join(step_dir_bpe, fname)
if skip_bpe:
logging.info("Link pre-encoded: %s -> %s", input_fname, output_fname)
os.symlink(os.path.join("..", DIR_TOK, fname), output_fname)
else:
codes_fname = os.path.join(step_dir_bpe_model, FILE_BPE_CODES)
logging.info("BPE: %s -> %s", input_fname, output_fname)
third_party.call_apply_bpe(workspace_dir=args.workspace,
input_fname=input_fname,
output_fname=output_fname,
model_fname=codes_fname)
# Record success
touch_file(complete_fname)
logging.info("Step complete: %s", step_dir_bpe)
# Done if only running data preparation steps
if args.model == MODEL_NONE:
return
# (8) Run Sockeye training
logging.info("=== Train translation model ===")
logging.info("Model: %s", args.model)
if args.model == MODEL_GNMT:
logging.info("NOTE: This is an 8 layer LSTM model similar (but not exactly identical) to the 'GNMT' architecture.")
step_dir_model = os.path.join(dir_task, DIR_PREFIX_MODEL + args.model)
complete_fname = os.path.join(step_dir_model, FILE_COMPLETE)
if os.path.exists(complete_fname):
logging.info("Re-use completed step: %s", step_dir_model)
else:
log_fname = os.path.join(args.workspace,
DIR_LOGS,
"sockeye.{{}}.{}.{}.{}.log".format(task_name, args.model, os.getpid()))
call_sockeye_train(args.model,
step_dir_bpe,
step_dir_model,
log_fname.format("train"),
args.gpus,
test_mode=args.test)
call_sockeye_average(step_dir_model, log_fname.format("average"))
# Record success
touch_file(complete_fname)
logging.info("Step complete: %s", step_dir_model)
# (9) Decode test sets
logging.info("=== Decode test sets ===")
logging.info("Settings: %s", args.decode_settings)
step_dir_results = os.path.join(dir_task, DIR_RESULTS)
if not os.path.exists(step_dir_results):
logging.info("Create: %s", step_dir_results)
os.makedirs(step_dir_results)
# To collect BPE output names
output_fnames_bpe = []
# For each test file
for fname in os.listdir(step_dir_bpe):
if fname.startswith(PREFIX_TEST) and fname.endswith(SUFFIX_SRC_GZ):
input_fname = os.path.join(step_dir_bpe, fname)
# /path/to/results/test[.N].<model>.<settings>
output_fname = os.path.join(step_dir_results, "{}.{}.{}.{}".format(args.model,
args.decode_settings,
fname[:-len(SUFFIX_SRC_GZ) - 1],
SUFFIX_BPE))
output_fnames_bpe.append(output_fname)
# For the shared results directory, a command file indicates that
# the step has completed successfully.
command_fname = output_fname + "." + SUFFIX_COMMAND
if os.path.exists(command_fname):
logging.info("Re-use output: %s", output_fname)
else:
log_fname = os.path.join(args.workspace,
DIR_LOGS,
"sockeye.translate.{}.{}.{}.{}.log".format(task_name,
args.model,
fname[:-len(SUFFIX_SRC_GZ) - 1],
os.getpid()))
call_sockeye_translate(args=DECODE_ARGS[args.decode_settings],
input_fname=input_fname,
output_fname=output_fname,
model_dir=step_dir_model,
log_fname=log_fname,
use_cpu=(args.gpus == 0))
# (10) Evaluate test sets (bpe/tok/detok)
lang_code = lang_codes[1] if lang_codes else None
logging.info("=== Score outputs ===")
# For each output file
for fname_bpe in output_fnames_bpe:
# Score byte-pair encoded
fname_base = os.path.basename(fname_bpe)[:-len(SUFFIX_BPE)].split(".", 2)[2]
fname_ref_bpe = os.path.join(step_dir_bpe, fname_base + SUFFIX_TRG_GZ)
fname_bleu_bpe = fname_bpe + "." + SUFFIX_BLEU
if os.path.exists(fname_bleu_bpe):
logging.info("Re-use output: %s", fname_bleu_bpe)
else:
fname_log = os.path.join(args.workspace,
DIR_LOGS,
"sacrebleu.sacrebleu.{}.{}.{}.{}.log".format(task_name,
args.model,
fname_base + SUFFIX_BPE,
os.getpid()))
call_sacrebleu(input_fname=fname_bpe,
ref_fname=fname_ref_bpe,
output_fname=fname_bleu_bpe,
log_fname=fname_log,
tokenized=True)
# Score tokenized
fname_tok = fname_bpe[:-len(SUFFIX_BPE)] + SUFFIX_TOK
fname_ref_tok = os.path.join(step_dir_tok, fname_base + SUFFIX_TRG_GZ)
fname_bleu_tok = fname_tok + "." + SUFFIX_BLEU
if os.path.exists(fname_bleu_tok):
logging.info("Re-use output: %s", fname_bleu_tok)
else:
# Merge BPE
logging.info("Merge BPE: %s -> %s", fname_bpe, fname_tok)
third_party.merge_bpe(input_fname=fname_bpe, output_fname=fname_tok)
fname_log = os.path.join(args.workspace,
DIR_LOGS,
"sacrebleu.sacrebleu.{}.{}.{}.{}.log".format(task_name,
args.model,
fname_base + SUFFIX_TOK,
os.getpid()))
call_sacrebleu(input_fname=fname_tok,
ref_fname=fname_ref_tok,
output_fname=fname_bleu_tok,
log_fname=fname_log,
tokenized=True)
# Score detokenized (WMT-compatible BLEU)
fname_detok = fname_bpe[:-len(SUFFIX_BPE)] + SUFFIX_DETOK
fname_ref_raw = os.path.join(step_dir_raw, fname_base + SUFFIX_TRG_GZ)
fname_bleu_detok = fname_detok + "." + SUFFIX_SACREBLEU
if os.path.exists(fname_bleu_detok):
logging.info("Re-use output: %s", fname_bleu_detok)
else:
if not requires_tokenization:
logging.info(
"WARNING: Task uses pre-tokenized data, cannot reliably detokenize to compute WMT-compatible scores")
continue
# Detokenize
logging.info("Detokenize (%s): %s -> %s", lang_code, fname_tok, fname_detok)
third_party.call_moses_detokenizer(workspace_dir=args.workspace,
input_fname=fname_tok,
output_fname=fname_detok,
lang_code=lang_code)
fname_log = os.path.join(args.workspace,
DIR_LOGS,
"sacrebleu.sacrebleu.{}.{}.{}.{}.log".format(task_name,
args.model,
fname_base + SUFFIX_DETOK,
os.getpid()))
call_sacrebleu(input_fname=fname_detok,
ref_fname=fname_ref_raw,
output_fname=fname_bleu_detok,
log_fname=fname_log,
tokenized=False)
|
[
"def",
"run_steps",
"(",
"args",
":",
"argparse",
".",
"Namespace",
")",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
",",
"format",
"=",
"\"sockeye.autopilot: %(message)s\"",
")",
"# (1) Establish task",
"logging",
".",
"info",
"(",
"\"=== Start Autopilot ===\"",
")",
"# Listed task",
"if",
"args",
".",
"task",
":",
"task",
"=",
"TASKS",
"[",
"args",
".",
"task",
"]",
"logging",
".",
"info",
"(",
"\"Task: %s\"",
",",
"task",
".",
"description",
")",
"logging",
".",
"info",
"(",
"\"URL: %s\"",
",",
"task",
".",
"url",
")",
"def",
"report_data",
"(",
"file_sets",
")",
":",
"for",
"file_set",
"in",
"file_sets",
":",
"for",
"fname",
"in",
"file_set",
"[",
":",
"2",
"]",
":",
"logging",
".",
"info",
"(",
"\" %s\"",
",",
"fname",
")",
"logging",
".",
"info",
"(",
"\" Train:\"",
")",
"report_data",
"(",
"task",
".",
"train",
")",
"logging",
".",
"info",
"(",
"\" Dev:\"",
")",
"report_data",
"(",
"task",
".",
"dev",
")",
"logging",
".",
"info",
"(",
"\" Test:\"",
")",
"report_data",
"(",
"task",
".",
"test",
")",
"# Custom task",
"else",
":",
"logging",
".",
"info",
"(",
"\"Task: custom\"",
")",
"# Source and target language codes",
"lang_codes",
"=",
"(",
"task",
".",
"src_lang",
",",
"task",
".",
"trg_lang",
")",
"if",
"args",
".",
"task",
"else",
"args",
".",
"custom_lang",
"# (2) Establish workspace and task directories",
"logging",
".",
"info",
"(",
"\"=== Establish working directories ===\"",
")",
"logging",
".",
"info",
"(",
"\"Workspace: %s\"",
",",
"args",
".",
"workspace",
")",
"special_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"workspace",
",",
"FILE_WORKSPACE",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"args",
".",
"workspace",
")",
":",
"logging",
".",
"info",
"(",
"\"Create: %s\"",
",",
"args",
".",
"workspace",
")",
"os",
".",
"makedirs",
"(",
"args",
".",
"workspace",
")",
"touch_file",
"(",
"special_fname",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"special_fname",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Directory %s exists but %s does not, stopping to avoid overwriting files in non-workspace directory\"",
"%",
"(",
"args",
".",
"workspace",
",",
"special_fname",
")",
")",
"dir_third_party",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"workspace",
",",
"third_party",
".",
"DIR_THIRD_PARTY",
")",
"dir_cache",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"workspace",
",",
"DIR_CACHE",
")",
"dir_logs",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"workspace",
",",
"DIR_LOGS",
")",
"dir_systems",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"workspace",
",",
"DIR_SYSTEMS",
")",
"task_name",
"=",
"args",
".",
"task",
"if",
"args",
".",
"task",
"else",
"args",
".",
"custom_task",
"if",
"args",
".",
"test",
":",
"task_name",
"+=",
"SUFFIX_TEST",
"dir_task",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_systems",
",",
"task_name",
")",
"for",
"dirname",
"in",
"(",
"dir_third_party",
",",
"dir_cache",
",",
"dir_logs",
",",
"dir_systems",
",",
"dir_task",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dirname",
")",
":",
"logging",
".",
"info",
"(",
"\"Exists: %s\"",
",",
"dirname",
")",
"else",
":",
"logging",
".",
"info",
"(",
"\"Create: %s\"",
",",
"dirname",
")",
"os",
".",
"makedirs",
"(",
"dirname",
")",
"# (3) Checkout necessary tools",
"logging",
".",
"info",
"(",
"\"=== Checkout third-party tools ===\"",
")",
"# Requires tokenization?",
"if",
"args",
".",
"task",
"or",
"args",
".",
"custom_text_type",
"==",
"CUSTOM_UTF8_RAW",
":",
"third_party",
".",
"checkout_moses_tokenizer",
"(",
"args",
".",
"workspace",
")",
"# Requires byte-pair encoding?",
"if",
"args",
".",
"task",
"or",
"args",
".",
"custom_text_type",
"in",
"(",
"CUSTOM_UTF8_RAW",
",",
"CUSTOM_UTF8_TOK",
")",
":",
"third_party",
".",
"checkout_subword_nmt",
"(",
"args",
".",
"workspace",
")",
"# (4) Populate train/dev/test data",
"# This step also normalizes whitespace on data population or copy, ensuring",
"# that for all input data, only ASCII newlines are considered line breaks.",
"logging",
".",
"info",
"(",
"\"=== Populate train/dev/test data ===\"",
")",
"step_dir_raw",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_task",
",",
"DIR_DATA",
",",
"DIR_RAW",
")",
"complete_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_raw",
",",
"FILE_COMPLETE",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"complete_fname",
")",
":",
"logging",
".",
"info",
"(",
"\"Re-use completed step: %s\"",
",",
"step_dir_raw",
")",
"else",
":",
"# Listed task",
"if",
"args",
".",
"task",
":",
"raw_files",
"=",
"identify_raw_files",
"(",
"task",
",",
"test_mode",
"=",
"args",
".",
"test",
")",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
"prefix",
"=",
"\"raw.\"",
",",
"dir",
"=",
"dir_task",
")",
"as",
"raw_dir",
":",
"# Download (or locate in cache) and extract raw files to temp directory",
"logging",
".",
"info",
"(",
"\"=== Download and extract raw files ===\"",
")",
"download_extract_raw_files",
"(",
"raw_files",
",",
"dir_cache",
",",
"raw_dir",
")",
"# Copy required files to train/dev/test",
"logging",
".",
"info",
"(",
"\"=== Create input data files ===\"",
")",
"renew_step_dir",
"(",
"step_dir_raw",
")",
"# Test mode uses the full test set as training data and the",
"# first line of the test set as dev and test data",
"populate_parallel_text",
"(",
"raw_dir",
",",
"task",
".",
"test",
"if",
"args",
".",
"test",
"else",
"task",
".",
"train",
",",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_raw",
",",
"PREFIX_TRAIN",
")",
",",
"False",
")",
"populate_parallel_text",
"(",
"raw_dir",
",",
"task",
".",
"test",
"if",
"args",
".",
"test",
"else",
"task",
".",
"dev",
",",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_raw",
",",
"PREFIX_DEV",
")",
",",
"False",
",",
"head_n",
"=",
"1",
"if",
"args",
".",
"test",
"else",
"0",
")",
"populate_parallel_text",
"(",
"raw_dir",
",",
"task",
".",
"test",
",",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_raw",
",",
"PREFIX_TEST",
")",
",",
"True",
",",
"head_n",
"=",
"1",
"if",
"args",
".",
"test",
"else",
"0",
")",
"# Custom task",
"else",
":",
"logging",
".",
"info",
"(",
"\"=== Copy input data files ===\"",
")",
"renew_step_dir",
"(",
"step_dir_raw",
")",
"copy_parallel_text",
"(",
"args",
".",
"custom_train",
",",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_raw",
",",
"PREFIX_TRAIN",
")",
")",
"copy_parallel_text",
"(",
"args",
".",
"custom_dev",
",",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_raw",
",",
"PREFIX_DEV",
")",
")",
"copy_parallel_text",
"(",
"args",
".",
"custom_test",
",",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_raw",
",",
"PREFIX_TEST",
")",
")",
"# Record success",
"touch_file",
"(",
"complete_fname",
")",
"logging",
".",
"info",
"(",
"\"Step complete: %s\"",
",",
"step_dir_raw",
")",
"# (5) Tokenize train/dev/test data",
"# Task requires tokenization if _any_ raw file is not already tokenized",
"requires_tokenization",
"=",
"False",
"if",
"args",
".",
"task",
":",
"for",
"file_sets",
"in",
"(",
"task",
".",
"train",
",",
"task",
".",
"dev",
",",
"task",
".",
"test",
")",
":",
"for",
"_",
",",
"_",
",",
"text_type",
"in",
"file_sets",
":",
"if",
"text_type",
"in",
"TEXT_REQUIRES_TOKENIZATION",
":",
"requires_tokenization",
"=",
"True",
"else",
":",
"if",
"args",
".",
"custom_text_type",
"==",
"CUSTOM_UTF8_RAW",
":",
"requires_tokenization",
"=",
"True",
"logging",
".",
"info",
"(",
"\"=== Tokenize train/dev/test data ===\"",
")",
"step_dir_tok",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_task",
",",
"DIR_DATA",
",",
"DIR_TOK",
")",
"complete_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_tok",
",",
"FILE_COMPLETE",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"complete_fname",
")",
":",
"logging",
".",
"info",
"(",
"\"Re-use completed step: %s\"",
",",
"step_dir_tok",
")",
"else",
":",
"renew_step_dir",
"(",
"step_dir_tok",
")",
"# Tokenize each data file using the appropriate language code OR link",
"# raw file if already tokenized.",
"for",
"fname",
"in",
"os",
".",
"listdir",
"(",
"step_dir_raw",
")",
":",
"if",
"fname",
".",
"startswith",
"(",
"\".\"",
")",
":",
"continue",
"input_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_raw",
",",
"fname",
")",
"output_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_tok",
",",
"fname",
")",
"if",
"requires_tokenization",
":",
"lang_code",
"=",
"lang_codes",
"[",
"0",
"]",
"if",
"fname",
".",
"endswith",
"(",
"SUFFIX_SRC_GZ",
")",
"else",
"lang_codes",
"[",
"1",
"]",
"logging",
".",
"info",
"(",
"\"Tokenize (%s): %s -> %s\"",
",",
"lang_code",
",",
"input_fname",
",",
"output_fname",
")",
"third_party",
".",
"call_moses_tokenizer",
"(",
"workspace_dir",
"=",
"args",
".",
"workspace",
",",
"input_fname",
"=",
"input_fname",
",",
"output_fname",
"=",
"output_fname",
",",
"lang_code",
"=",
"lang_code",
")",
"else",
":",
"logging",
".",
"info",
"(",
"\"Link pre-tokenized: %s -> %s\"",
",",
"input_fname",
",",
"output_fname",
")",
"os",
".",
"symlink",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"..\"",
",",
"DIR_RAW",
",",
"fname",
")",
",",
"output_fname",
")",
"# Record success",
"touch_file",
"(",
"complete_fname",
")",
"logging",
".",
"info",
"(",
"\"Step complete: %s\"",
",",
"step_dir_tok",
")",
"# (6) Learn byte-pair encoding model",
"# Task requires byte-pair encoding unless using pre-encoded custom data",
"skip_bpe",
"=",
"(",
"not",
"args",
".",
"task",
")",
"and",
"args",
".",
"custom_text_type",
"==",
"CUSTOM_UTF8_BPE",
"logging",
".",
"info",
"(",
"\"=== Learn byte-pair encoding model ===\"",
")",
"step_dir_bpe_model",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_task",
",",
"DIR_BPE_MODEL",
")",
"complete_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_bpe_model",
",",
"FILE_COMPLETE",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"complete_fname",
")",
":",
"logging",
".",
"info",
"(",
"\"Re-use completed step: %s\"",
",",
"step_dir_bpe_model",
")",
"else",
":",
"renew_step_dir",
"(",
"step_dir_bpe_model",
")",
"if",
"skip_bpe",
":",
"logging",
".",
"info",
"(",
"\"BPE model not required for pre-encoded data\"",
")",
"else",
":",
"source_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_tok",
",",
"PREFIX_TRAIN",
"+",
"SUFFIX_SRC_GZ",
")",
"target_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_tok",
",",
"PREFIX_TRAIN",
"+",
"SUFFIX_TRG_GZ",
")",
"codes_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_bpe_model",
",",
"FILE_BPE_CODES",
")",
"num_ops",
"=",
"task",
".",
"bpe_op",
"if",
"args",
".",
"task",
"else",
"args",
".",
"custom_bpe_op",
"if",
"args",
".",
"test",
":",
"num_ops",
"=",
"TEST_BPE_OPS",
"logging",
".",
"info",
"(",
"\"BPE Learn (%s): %s + %s -> %s\"",
",",
"num_ops",
",",
"source_fname",
",",
"target_fname",
",",
"codes_fname",
")",
"third_party",
".",
"call_learn_bpe",
"(",
"workspace_dir",
"=",
"args",
".",
"workspace",
",",
"source_fname",
"=",
"source_fname",
",",
"target_fname",
"=",
"target_fname",
",",
"model_fname",
"=",
"codes_fname",
",",
"num_ops",
"=",
"num_ops",
")",
"# Record success",
"touch_file",
"(",
"complete_fname",
")",
"logging",
".",
"info",
"(",
"\"Step complete: %s\"",
",",
"step_dir_bpe_model",
")",
"# (7) Byte-pair encode data",
"logging",
".",
"info",
"(",
"\"=== Byte-pair encode train/dev/test data ===\"",
")",
"step_dir_bpe",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_task",
",",
"DIR_DATA",
",",
"DIR_BPE",
")",
"complete_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_bpe",
",",
"FILE_COMPLETE",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"complete_fname",
")",
":",
"logging",
".",
"info",
"(",
"\"Re-use completed step: %s\"",
",",
"step_dir_bpe",
")",
"else",
":",
"renew_step_dir",
"(",
"step_dir_bpe",
")",
"# Encode each data file",
"for",
"fname",
"in",
"os",
".",
"listdir",
"(",
"step_dir_tok",
")",
":",
"if",
"fname",
".",
"startswith",
"(",
"\".\"",
")",
":",
"continue",
"input_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_tok",
",",
"fname",
")",
"output_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_bpe",
",",
"fname",
")",
"if",
"skip_bpe",
":",
"logging",
".",
"info",
"(",
"\"Link pre-encoded: %s -> %s\"",
",",
"input_fname",
",",
"output_fname",
")",
"os",
".",
"symlink",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"..\"",
",",
"DIR_TOK",
",",
"fname",
")",
",",
"output_fname",
")",
"else",
":",
"codes_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_bpe_model",
",",
"FILE_BPE_CODES",
")",
"logging",
".",
"info",
"(",
"\"BPE: %s -> %s\"",
",",
"input_fname",
",",
"output_fname",
")",
"third_party",
".",
"call_apply_bpe",
"(",
"workspace_dir",
"=",
"args",
".",
"workspace",
",",
"input_fname",
"=",
"input_fname",
",",
"output_fname",
"=",
"output_fname",
",",
"model_fname",
"=",
"codes_fname",
")",
"# Record success",
"touch_file",
"(",
"complete_fname",
")",
"logging",
".",
"info",
"(",
"\"Step complete: %s\"",
",",
"step_dir_bpe",
")",
"# Done if only running data preparation steps",
"if",
"args",
".",
"model",
"==",
"MODEL_NONE",
":",
"return",
"# (8) Run Sockeye training",
"logging",
".",
"info",
"(",
"\"=== Train translation model ===\"",
")",
"logging",
".",
"info",
"(",
"\"Model: %s\"",
",",
"args",
".",
"model",
")",
"if",
"args",
".",
"model",
"==",
"MODEL_GNMT",
":",
"logging",
".",
"info",
"(",
"\"NOTE: This is an 8 layer LSTM model similar (but not exactly identical) to the 'GNMT' architecture.\"",
")",
"step_dir_model",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_task",
",",
"DIR_PREFIX_MODEL",
"+",
"args",
".",
"model",
")",
"complete_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_model",
",",
"FILE_COMPLETE",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"complete_fname",
")",
":",
"logging",
".",
"info",
"(",
"\"Re-use completed step: %s\"",
",",
"step_dir_model",
")",
"else",
":",
"log_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"workspace",
",",
"DIR_LOGS",
",",
"\"sockeye.{{}}.{}.{}.{}.log\"",
".",
"format",
"(",
"task_name",
",",
"args",
".",
"model",
",",
"os",
".",
"getpid",
"(",
")",
")",
")",
"call_sockeye_train",
"(",
"args",
".",
"model",
",",
"step_dir_bpe",
",",
"step_dir_model",
",",
"log_fname",
".",
"format",
"(",
"\"train\"",
")",
",",
"args",
".",
"gpus",
",",
"test_mode",
"=",
"args",
".",
"test",
")",
"call_sockeye_average",
"(",
"step_dir_model",
",",
"log_fname",
".",
"format",
"(",
"\"average\"",
")",
")",
"# Record success",
"touch_file",
"(",
"complete_fname",
")",
"logging",
".",
"info",
"(",
"\"Step complete: %s\"",
",",
"step_dir_model",
")",
"# (9) Decode test sets",
"logging",
".",
"info",
"(",
"\"=== Decode test sets ===\"",
")",
"logging",
".",
"info",
"(",
"\"Settings: %s\"",
",",
"args",
".",
"decode_settings",
")",
"step_dir_results",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_task",
",",
"DIR_RESULTS",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"step_dir_results",
")",
":",
"logging",
".",
"info",
"(",
"\"Create: %s\"",
",",
"step_dir_results",
")",
"os",
".",
"makedirs",
"(",
"step_dir_results",
")",
"# To collect BPE output names",
"output_fnames_bpe",
"=",
"[",
"]",
"# For each test file",
"for",
"fname",
"in",
"os",
".",
"listdir",
"(",
"step_dir_bpe",
")",
":",
"if",
"fname",
".",
"startswith",
"(",
"PREFIX_TEST",
")",
"and",
"fname",
".",
"endswith",
"(",
"SUFFIX_SRC_GZ",
")",
":",
"input_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_bpe",
",",
"fname",
")",
"# /path/to/results/test[.N].<model>.<settings>",
"output_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_results",
",",
"\"{}.{}.{}.{}\"",
".",
"format",
"(",
"args",
".",
"model",
",",
"args",
".",
"decode_settings",
",",
"fname",
"[",
":",
"-",
"len",
"(",
"SUFFIX_SRC_GZ",
")",
"-",
"1",
"]",
",",
"SUFFIX_BPE",
")",
")",
"output_fnames_bpe",
".",
"append",
"(",
"output_fname",
")",
"# For the shared results directory, a command file indicates that",
"# the step has completed successfully.",
"command_fname",
"=",
"output_fname",
"+",
"\".\"",
"+",
"SUFFIX_COMMAND",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"command_fname",
")",
":",
"logging",
".",
"info",
"(",
"\"Re-use output: %s\"",
",",
"output_fname",
")",
"else",
":",
"log_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"workspace",
",",
"DIR_LOGS",
",",
"\"sockeye.translate.{}.{}.{}.{}.log\"",
".",
"format",
"(",
"task_name",
",",
"args",
".",
"model",
",",
"fname",
"[",
":",
"-",
"len",
"(",
"SUFFIX_SRC_GZ",
")",
"-",
"1",
"]",
",",
"os",
".",
"getpid",
"(",
")",
")",
")",
"call_sockeye_translate",
"(",
"args",
"=",
"DECODE_ARGS",
"[",
"args",
".",
"decode_settings",
"]",
",",
"input_fname",
"=",
"input_fname",
",",
"output_fname",
"=",
"output_fname",
",",
"model_dir",
"=",
"step_dir_model",
",",
"log_fname",
"=",
"log_fname",
",",
"use_cpu",
"=",
"(",
"args",
".",
"gpus",
"==",
"0",
")",
")",
"# (10) Evaluate test sets (bpe/tok/detok)",
"lang_code",
"=",
"lang_codes",
"[",
"1",
"]",
"if",
"lang_codes",
"else",
"None",
"logging",
".",
"info",
"(",
"\"=== Score outputs ===\"",
")",
"# For each output file",
"for",
"fname_bpe",
"in",
"output_fnames_bpe",
":",
"# Score byte-pair encoded",
"fname_base",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fname_bpe",
")",
"[",
":",
"-",
"len",
"(",
"SUFFIX_BPE",
")",
"]",
".",
"split",
"(",
"\".\"",
",",
"2",
")",
"[",
"2",
"]",
"fname_ref_bpe",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_bpe",
",",
"fname_base",
"+",
"SUFFIX_TRG_GZ",
")",
"fname_bleu_bpe",
"=",
"fname_bpe",
"+",
"\".\"",
"+",
"SUFFIX_BLEU",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fname_bleu_bpe",
")",
":",
"logging",
".",
"info",
"(",
"\"Re-use output: %s\"",
",",
"fname_bleu_bpe",
")",
"else",
":",
"fname_log",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"workspace",
",",
"DIR_LOGS",
",",
"\"sacrebleu.sacrebleu.{}.{}.{}.{}.log\"",
".",
"format",
"(",
"task_name",
",",
"args",
".",
"model",
",",
"fname_base",
"+",
"SUFFIX_BPE",
",",
"os",
".",
"getpid",
"(",
")",
")",
")",
"call_sacrebleu",
"(",
"input_fname",
"=",
"fname_bpe",
",",
"ref_fname",
"=",
"fname_ref_bpe",
",",
"output_fname",
"=",
"fname_bleu_bpe",
",",
"log_fname",
"=",
"fname_log",
",",
"tokenized",
"=",
"True",
")",
"# Score tokenized",
"fname_tok",
"=",
"fname_bpe",
"[",
":",
"-",
"len",
"(",
"SUFFIX_BPE",
")",
"]",
"+",
"SUFFIX_TOK",
"fname_ref_tok",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_tok",
",",
"fname_base",
"+",
"SUFFIX_TRG_GZ",
")",
"fname_bleu_tok",
"=",
"fname_tok",
"+",
"\".\"",
"+",
"SUFFIX_BLEU",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fname_bleu_tok",
")",
":",
"logging",
".",
"info",
"(",
"\"Re-use output: %s\"",
",",
"fname_bleu_tok",
")",
"else",
":",
"# Merge BPE",
"logging",
".",
"info",
"(",
"\"Merge BPE: %s -> %s\"",
",",
"fname_bpe",
",",
"fname_tok",
")",
"third_party",
".",
"merge_bpe",
"(",
"input_fname",
"=",
"fname_bpe",
",",
"output_fname",
"=",
"fname_tok",
")",
"fname_log",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"workspace",
",",
"DIR_LOGS",
",",
"\"sacrebleu.sacrebleu.{}.{}.{}.{}.log\"",
".",
"format",
"(",
"task_name",
",",
"args",
".",
"model",
",",
"fname_base",
"+",
"SUFFIX_TOK",
",",
"os",
".",
"getpid",
"(",
")",
")",
")",
"call_sacrebleu",
"(",
"input_fname",
"=",
"fname_tok",
",",
"ref_fname",
"=",
"fname_ref_tok",
",",
"output_fname",
"=",
"fname_bleu_tok",
",",
"log_fname",
"=",
"fname_log",
",",
"tokenized",
"=",
"True",
")",
"# Score detokenized (WMT-compatible BLEU)",
"fname_detok",
"=",
"fname_bpe",
"[",
":",
"-",
"len",
"(",
"SUFFIX_BPE",
")",
"]",
"+",
"SUFFIX_DETOK",
"fname_ref_raw",
"=",
"os",
".",
"path",
".",
"join",
"(",
"step_dir_raw",
",",
"fname_base",
"+",
"SUFFIX_TRG_GZ",
")",
"fname_bleu_detok",
"=",
"fname_detok",
"+",
"\".\"",
"+",
"SUFFIX_SACREBLEU",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fname_bleu_detok",
")",
":",
"logging",
".",
"info",
"(",
"\"Re-use output: %s\"",
",",
"fname_bleu_detok",
")",
"else",
":",
"if",
"not",
"requires_tokenization",
":",
"logging",
".",
"info",
"(",
"\"WARNING: Task uses pre-tokenized data, cannot reliably detokenize to compute WMT-compatible scores\"",
")",
"continue",
"# Detokenize",
"logging",
".",
"info",
"(",
"\"Detokenize (%s): %s -> %s\"",
",",
"lang_code",
",",
"fname_tok",
",",
"fname_detok",
")",
"third_party",
".",
"call_moses_detokenizer",
"(",
"workspace_dir",
"=",
"args",
".",
"workspace",
",",
"input_fname",
"=",
"fname_tok",
",",
"output_fname",
"=",
"fname_detok",
",",
"lang_code",
"=",
"lang_code",
")",
"fname_log",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"workspace",
",",
"DIR_LOGS",
",",
"\"sacrebleu.sacrebleu.{}.{}.{}.{}.log\"",
".",
"format",
"(",
"task_name",
",",
"args",
".",
"model",
",",
"fname_base",
"+",
"SUFFIX_DETOK",
",",
"os",
".",
"getpid",
"(",
")",
")",
")",
"call_sacrebleu",
"(",
"input_fname",
"=",
"fname_detok",
",",
"ref_fname",
"=",
"fname_ref_raw",
",",
"output_fname",
"=",
"fname_bleu_detok",
",",
"log_fname",
"=",
"fname_log",
",",
"tokenized",
"=",
"False",
")"
] | 49.871508 | 22.625698 |
def advance(self, blocksize):
""" Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if all of the status information if valid,
False if any is not.
"""
try:
if self.increment_update_cache:
self.update_cache_by_increment(blocksize)
ts = DataBuffer.advance(self, blocksize)
return self.check_valid(ts)
except RuntimeError:
self.null_advance(blocksize)
return False
|
[
"def",
"advance",
"(",
"self",
",",
"blocksize",
")",
":",
"try",
":",
"if",
"self",
".",
"increment_update_cache",
":",
"self",
".",
"update_cache_by_increment",
"(",
"blocksize",
")",
"ts",
"=",
"DataBuffer",
".",
"advance",
"(",
"self",
",",
"blocksize",
")",
"return",
"self",
".",
"check_valid",
"(",
"ts",
")",
"except",
"RuntimeError",
":",
"self",
".",
"null_advance",
"(",
"blocksize",
")",
"return",
"False"
] | 31.173913 | 16.782609 |
def clinvar_submission_lines(submission_objs, submission_header):
"""Create the lines to include in a Clinvar submission csv file from a list of submission objects and a custom document header
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
submission_header(dict) : as in constants CLINVAR_HEADER and CASEDATA_HEADER, but with required fields only
Returns:
submission_lines(list) a list of strings, each string represents a line of the clinvar csv file to be doenloaded
"""
submission_lines = []
for submission_obj in submission_objs: # Loop over the submission objects. Each of these is a line
csv_line = []
for header_key, header_value in submission_header.items(): # header_keys are the same keys as in submission_objs
if header_key in submission_obj: # The field is filled in for this variant/casedata object
csv_line.append('"'+submission_obj.get(header_key)+'"')
else: # Empty field for this this variant/casedata object
csv_line.append('""')
submission_lines.append(','.join(csv_line))
return submission_lines
|
[
"def",
"clinvar_submission_lines",
"(",
"submission_objs",
",",
"submission_header",
")",
":",
"submission_lines",
"=",
"[",
"]",
"for",
"submission_obj",
"in",
"submission_objs",
":",
"# Loop over the submission objects. Each of these is a line",
"csv_line",
"=",
"[",
"]",
"for",
"header_key",
",",
"header_value",
"in",
"submission_header",
".",
"items",
"(",
")",
":",
"# header_keys are the same keys as in submission_objs",
"if",
"header_key",
"in",
"submission_obj",
":",
"# The field is filled in for this variant/casedata object",
"csv_line",
".",
"append",
"(",
"'\"'",
"+",
"submission_obj",
".",
"get",
"(",
"header_key",
")",
"+",
"'\"'",
")",
"else",
":",
"# Empty field for this this variant/casedata object",
"csv_line",
".",
"append",
"(",
"'\"\"'",
")",
"submission_lines",
".",
"append",
"(",
"','",
".",
"join",
"(",
"csv_line",
")",
")",
"return",
"submission_lines"
] | 54.5 | 35.636364 |
def update_assessment_offered(self, assessment_offered_form):
"""Updates an existing assessment offered.
arg: assessment_offered_form
(osid.assessment.AssessmentOfferedForm): the form
containing the elements to be updated
raise: IllegalState - ``assessment_offrered_form`` already used
in an update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``assessment_offered_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_form`` did not originate from
``get_assessment_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.update_resource_template
collection = JSONClientValidated('assessment',
collection='AssessmentOffered',
runtime=self._runtime)
if not isinstance(assessment_offered_form, ABCAssessmentOfferedForm):
raise errors.InvalidArgument('argument type is not an AssessmentOfferedForm')
if not assessment_offered_form.is_for_update():
raise errors.InvalidArgument('the AssessmentOfferedForm is for update only, not create')
try:
if self._forms[assessment_offered_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('assessment_offered_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('assessment_offered_form did not originate from this session')
if not assessment_offered_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(assessment_offered_form._my_map)
self._forms[assessment_offered_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned:
return objects.AssessmentOffered(
osid_object_map=assessment_offered_form._my_map,
runtime=self._runtime,
proxy=self._proxy)
|
[
"def",
"update_assessment_offered",
"(",
"self",
",",
"assessment_offered_form",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceAdminSession.update_resource_template",
"collection",
"=",
"JSONClientValidated",
"(",
"'assessment'",
",",
"collection",
"=",
"'AssessmentOffered'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"if",
"not",
"isinstance",
"(",
"assessment_offered_form",
",",
"ABCAssessmentOfferedForm",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
"'argument type is not an AssessmentOfferedForm'",
")",
"if",
"not",
"assessment_offered_form",
".",
"is_for_update",
"(",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
"'the AssessmentOfferedForm is for update only, not create'",
")",
"try",
":",
"if",
"self",
".",
"_forms",
"[",
"assessment_offered_form",
".",
"get_id",
"(",
")",
".",
"get_identifier",
"(",
")",
"]",
"==",
"UPDATED",
":",
"raise",
"errors",
".",
"IllegalState",
"(",
"'assessment_offered_form already used in an update transaction'",
")",
"except",
"KeyError",
":",
"raise",
"errors",
".",
"Unsupported",
"(",
"'assessment_offered_form did not originate from this session'",
")",
"if",
"not",
"assessment_offered_form",
".",
"is_valid",
"(",
")",
":",
"raise",
"errors",
".",
"InvalidArgument",
"(",
"'one or more of the form elements is invalid'",
")",
"collection",
".",
"save",
"(",
"assessment_offered_form",
".",
"_my_map",
")",
"self",
".",
"_forms",
"[",
"assessment_offered_form",
".",
"get_id",
"(",
")",
".",
"get_identifier",
"(",
")",
"]",
"=",
"UPDATED",
"# Note: this is out of spec. The OSIDs don't require an object to be returned:",
"return",
"objects",
".",
"AssessmentOffered",
"(",
"osid_object_map",
"=",
"assessment_offered_form",
".",
"_my_map",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"proxy",
"=",
"self",
".",
"_proxy",
")"
] | 55.166667 | 26.190476 |
def intersection(self,other):
"""
Return a new Interval with the intersection of the two intervals,
i.e. all elements that are in both self and other.
:param Interval other: Interval to intersect with
:rtype: Interval
"""
if self.bounds[0] < other.bounds[0]:
i1,i2 = self,other
else:
i2,i1 = self,other
if self.is_disjoint(other):
return Interval((1,0),(True,True))
bounds = [None,None]
included = [None,None]
#sets are not disjoint, so i2.bounds[0] in i1:
bounds[0] = i2.bounds[0]
included[0] = i2.included[0]
if i2.bounds[1] in i1:
bounds[1] = i2.bounds[1]
included[1] = i2.included[1]
else:
bounds[1] = i1.bounds[1]
included[1] = i1.included[1]
return Interval(bounds,included)
|
[
"def",
"intersection",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"bounds",
"[",
"0",
"]",
"<",
"other",
".",
"bounds",
"[",
"0",
"]",
":",
"i1",
",",
"i2",
"=",
"self",
",",
"other",
"else",
":",
"i2",
",",
"i1",
"=",
"self",
",",
"other",
"if",
"self",
".",
"is_disjoint",
"(",
"other",
")",
":",
"return",
"Interval",
"(",
"(",
"1",
",",
"0",
")",
",",
"(",
"True",
",",
"True",
")",
")",
"bounds",
"=",
"[",
"None",
",",
"None",
"]",
"included",
"=",
"[",
"None",
",",
"None",
"]",
"#sets are not disjoint, so i2.bounds[0] in i1:",
"bounds",
"[",
"0",
"]",
"=",
"i2",
".",
"bounds",
"[",
"0",
"]",
"included",
"[",
"0",
"]",
"=",
"i2",
".",
"included",
"[",
"0",
"]",
"if",
"i2",
".",
"bounds",
"[",
"1",
"]",
"in",
"i1",
":",
"bounds",
"[",
"1",
"]",
"=",
"i2",
".",
"bounds",
"[",
"1",
"]",
"included",
"[",
"1",
"]",
"=",
"i2",
".",
"included",
"[",
"1",
"]",
"else",
":",
"bounds",
"[",
"1",
"]",
"=",
"i1",
".",
"bounds",
"[",
"1",
"]",
"included",
"[",
"1",
"]",
"=",
"i1",
".",
"included",
"[",
"1",
"]",
"return",
"Interval",
"(",
"bounds",
",",
"included",
")"
] | 29.233333 | 15.033333 |
def standard_kinetics(target, quantity, prefactor, exponent):
r"""
"""
X = target[quantity]
A = target[prefactor]
b = target[exponent]
r = A*(X**b)
S1 = A*b*(X**(b - 1))
S2 = A*(1 - b)*(X**b)
values = {'S1': S1, 'S2': S2, 'rate': r}
return values
|
[
"def",
"standard_kinetics",
"(",
"target",
",",
"quantity",
",",
"prefactor",
",",
"exponent",
")",
":",
"X",
"=",
"target",
"[",
"quantity",
"]",
"A",
"=",
"target",
"[",
"prefactor",
"]",
"b",
"=",
"target",
"[",
"exponent",
"]",
"r",
"=",
"A",
"*",
"(",
"X",
"**",
"b",
")",
"S1",
"=",
"A",
"*",
"b",
"*",
"(",
"X",
"**",
"(",
"b",
"-",
"1",
")",
")",
"S2",
"=",
"A",
"*",
"(",
"1",
"-",
"b",
")",
"*",
"(",
"X",
"**",
"b",
")",
"values",
"=",
"{",
"'S1'",
":",
"S1",
",",
"'S2'",
":",
"S2",
",",
"'rate'",
":",
"r",
"}",
"return",
"values"
] | 21.230769 | 17.615385 |
def add_event_detect(channel, trigger, callback=None, bouncetime=None):
"""
This function is designed to be used in a loop with other things, but unlike
polling it is not going to miss the change in state of an input while the
CPU is busy working on other things. This could be useful when using
something like Pygame or PyQt where there is a main loop listening and
responding to GUI events in a timely basis.
:param channel: the channel based on the numbering system you have specified
(:py:attr:`GPIO.BOARD`, :py:attr:`GPIO.BCM` or :py:attr:`GPIO.SUNXI`).
:param trigger: The event to detect, one of: :py:attr:`GPIO.RISING`,
:py:attr:`GPIO.FALLING` or :py:attr:`GPIO.BOTH`.
:param callback: (optional) TODO
:param bouncetime: (optional) TODO
.. code: python
GPIO.add_event_detect(channel, GPIO.RISING) # add rising edge detection on a channel
do_something()
if GPIO.event_detected(channel):
print('Button pressed')
"""
_check_configured(channel, direction=IN)
if bouncetime is not None:
if _gpio_warnings:
warnings.warn("bouncetime is not (yet) fully supported, continuing anyway. Use GPIO.setwarnings(False) to disable warnings.", stacklevel=2)
pin = get_gpio_pin(_mode, channel)
event.add_edge_detect(pin, trigger, __wrap(callback, channel))
|
[
"def",
"add_event_detect",
"(",
"channel",
",",
"trigger",
",",
"callback",
"=",
"None",
",",
"bouncetime",
"=",
"None",
")",
":",
"_check_configured",
"(",
"channel",
",",
"direction",
"=",
"IN",
")",
"if",
"bouncetime",
"is",
"not",
"None",
":",
"if",
"_gpio_warnings",
":",
"warnings",
".",
"warn",
"(",
"\"bouncetime is not (yet) fully supported, continuing anyway. Use GPIO.setwarnings(False) to disable warnings.\"",
",",
"stacklevel",
"=",
"2",
")",
"pin",
"=",
"get_gpio_pin",
"(",
"_mode",
",",
"channel",
")",
"event",
".",
"add_edge_detect",
"(",
"pin",
",",
"trigger",
",",
"__wrap",
"(",
"callback",
",",
"channel",
")",
")"
] | 45.166667 | 25.966667 |
def reject_sender(self, link_handle, pn_condition=None):
"""Rejects the SenderLink, and destroys the handle."""
link = self._sender_links.get(link_handle)
if not link:
raise Exception("Invalid link_handle: %s" % link_handle)
link.reject(pn_condition)
# note: normally, link.destroy() cannot be called from a callback,
# but this link was never made available to the application so this
# link is only referenced by the connection
link.destroy()
|
[
"def",
"reject_sender",
"(",
"self",
",",
"link_handle",
",",
"pn_condition",
"=",
"None",
")",
":",
"link",
"=",
"self",
".",
"_sender_links",
".",
"get",
"(",
"link_handle",
")",
"if",
"not",
"link",
":",
"raise",
"Exception",
"(",
"\"Invalid link_handle: %s\"",
"%",
"link_handle",
")",
"link",
".",
"reject",
"(",
"pn_condition",
")",
"# note: normally, link.destroy() cannot be called from a callback,",
"# but this link was never made available to the application so this",
"# link is only referenced by the connection",
"link",
".",
"destroy",
"(",
")"
] | 51.1 | 17.9 |
def tracelines(self, xstart, ystart, zstart, hstepmax, vstepfrac=0.2,
tmax=1e12, nstepmax=100, silent='.', color=None, orientation='hor',
win=[-1e30, 1e30, -1e30, 1e30], newfig=False, figsize=None):
"""Draw trace lines
"""
if color is None:
c = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif type(color) is str:
c = self.aq.naq * [color]
elif type(color) is list:
c = color
if len(c) < self.aq.naq:
n = int(np.ceil(self.aq.naq / len(c)))
c = n * c
fig = plt.gcf()
assert len(fig.axes) > 0, 'Error: Need to specify axes in figure before invoking tracelines'
ax1 = None
ax2 = None
if orientation == 'both':
ax1 = fig.axes[0]
ax2 = fig.axes[1]
elif orientation[:3] == 'hor':
ax1 = fig.axes[0]
elif orientation[:3] == 'ver':
ax2 = fig.axes[1]
xyztlist = []
for i in range(len(xstart)):
xyzt, layerlist = timtraceline(self, xstart[i], ystart[i], zstart[i], hstepmax=hstepmax,
vstepfrac=vstepfrac, tmax=tmax, nstepmax=nstepmax,
silent=silent, win=win, returnlayers=True)
if silent == '.':
print('.', end='', flush=True)
if ax1 is not None:
#plt.axes(ax1)
color = [c[self.aq.layernumber[i]] if self.aq.ltype[i] == 'a' else 'k' for i in layerlist]
points = np.array([xyzt[:,0], xyzt[:,1]]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, colors=color)
ax1.add_collection(lc)
#ax1.plot(xyzt[:, 0], xyzt[:, 1], color=color)
if ax2 is not None:
color = [c[self.aq.layernumber[i]] if self.aq.ltype[i] == 'a' else 'k' for i in layerlist]
points = np.array([xyzt[:,0], xyzt[:,2]]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, colors=color)
ax2.add_collection(lc)
ax2.set_ylim(self.aq.z[-1], self.aq.z[0])
|
[
"def",
"tracelines",
"(",
"self",
",",
"xstart",
",",
"ystart",
",",
"zstart",
",",
"hstepmax",
",",
"vstepfrac",
"=",
"0.2",
",",
"tmax",
"=",
"1e12",
",",
"nstepmax",
"=",
"100",
",",
"silent",
"=",
"'.'",
",",
"color",
"=",
"None",
",",
"orientation",
"=",
"'hor'",
",",
"win",
"=",
"[",
"-",
"1e30",
",",
"1e30",
",",
"-",
"1e30",
",",
"1e30",
"]",
",",
"newfig",
"=",
"False",
",",
"figsize",
"=",
"None",
")",
":",
"if",
"color",
"is",
"None",
":",
"c",
"=",
"plt",
".",
"rcParams",
"[",
"'axes.prop_cycle'",
"]",
".",
"by_key",
"(",
")",
"[",
"'color'",
"]",
"elif",
"type",
"(",
"color",
")",
"is",
"str",
":",
"c",
"=",
"self",
".",
"aq",
".",
"naq",
"*",
"[",
"color",
"]",
"elif",
"type",
"(",
"color",
")",
"is",
"list",
":",
"c",
"=",
"color",
"if",
"len",
"(",
"c",
")",
"<",
"self",
".",
"aq",
".",
"naq",
":",
"n",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"self",
".",
"aq",
".",
"naq",
"/",
"len",
"(",
"c",
")",
")",
")",
"c",
"=",
"n",
"*",
"c",
"fig",
"=",
"plt",
".",
"gcf",
"(",
")",
"assert",
"len",
"(",
"fig",
".",
"axes",
")",
">",
"0",
",",
"'Error: Need to specify axes in figure before invoking tracelines'",
"ax1",
"=",
"None",
"ax2",
"=",
"None",
"if",
"orientation",
"==",
"'both'",
":",
"ax1",
"=",
"fig",
".",
"axes",
"[",
"0",
"]",
"ax2",
"=",
"fig",
".",
"axes",
"[",
"1",
"]",
"elif",
"orientation",
"[",
":",
"3",
"]",
"==",
"'hor'",
":",
"ax1",
"=",
"fig",
".",
"axes",
"[",
"0",
"]",
"elif",
"orientation",
"[",
":",
"3",
"]",
"==",
"'ver'",
":",
"ax2",
"=",
"fig",
".",
"axes",
"[",
"1",
"]",
"xyztlist",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"xstart",
")",
")",
":",
"xyzt",
",",
"layerlist",
"=",
"timtraceline",
"(",
"self",
",",
"xstart",
"[",
"i",
"]",
",",
"ystart",
"[",
"i",
"]",
",",
"zstart",
"[",
"i",
"]",
",",
"hstepmax",
"=",
"hstepmax",
",",
"vstepfrac",
"=",
"vstepfrac",
",",
"tmax",
"=",
"tmax",
",",
"nstepmax",
"=",
"nstepmax",
",",
"silent",
"=",
"silent",
",",
"win",
"=",
"win",
",",
"returnlayers",
"=",
"True",
")",
"if",
"silent",
"==",
"'.'",
":",
"print",
"(",
"'.'",
",",
"end",
"=",
"''",
",",
"flush",
"=",
"True",
")",
"if",
"ax1",
"is",
"not",
"None",
":",
"#plt.axes(ax1)",
"color",
"=",
"[",
"c",
"[",
"self",
".",
"aq",
".",
"layernumber",
"[",
"i",
"]",
"]",
"if",
"self",
".",
"aq",
".",
"ltype",
"[",
"i",
"]",
"==",
"'a'",
"else",
"'k'",
"for",
"i",
"in",
"layerlist",
"]",
"points",
"=",
"np",
".",
"array",
"(",
"[",
"xyzt",
"[",
":",
",",
"0",
"]",
",",
"xyzt",
"[",
":",
",",
"1",
"]",
"]",
")",
".",
"T",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
",",
"2",
")",
"segments",
"=",
"np",
".",
"concatenate",
"(",
"[",
"points",
"[",
":",
"-",
"1",
"]",
",",
"points",
"[",
"1",
":",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"lc",
"=",
"LineCollection",
"(",
"segments",
",",
"colors",
"=",
"color",
")",
"ax1",
".",
"add_collection",
"(",
"lc",
")",
"#ax1.plot(xyzt[:, 0], xyzt[:, 1], color=color)",
"if",
"ax2",
"is",
"not",
"None",
":",
"color",
"=",
"[",
"c",
"[",
"self",
".",
"aq",
".",
"layernumber",
"[",
"i",
"]",
"]",
"if",
"self",
".",
"aq",
".",
"ltype",
"[",
"i",
"]",
"==",
"'a'",
"else",
"'k'",
"for",
"i",
"in",
"layerlist",
"]",
"points",
"=",
"np",
".",
"array",
"(",
"[",
"xyzt",
"[",
":",
",",
"0",
"]",
",",
"xyzt",
"[",
":",
",",
"2",
"]",
"]",
")",
".",
"T",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
",",
"2",
")",
"segments",
"=",
"np",
".",
"concatenate",
"(",
"[",
"points",
"[",
":",
"-",
"1",
"]",
",",
"points",
"[",
"1",
":",
"]",
"]",
",",
"axis",
"=",
"1",
")",
"lc",
"=",
"LineCollection",
"(",
"segments",
",",
"colors",
"=",
"color",
")",
"ax2",
".",
"add_collection",
"(",
"lc",
")",
"ax2",
".",
"set_ylim",
"(",
"self",
".",
"aq",
".",
"z",
"[",
"-",
"1",
"]",
",",
"self",
".",
"aq",
".",
"z",
"[",
"0",
"]",
")"
] | 49.148936 | 20.93617 |
def os_release(package, base='essex', reset_cache=False):
'''
Returns OpenStack release codename from a cached global.
If reset_cache then unset the cached os_release version and return the
freshly determined version.
If the codename can not be determined from either an installed package or
the installation source, the earliest release supported by the charm should
be returned.
'''
global _os_rel
if reset_cache:
reset_os_release()
if _os_rel:
return _os_rel
_os_rel = (
get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
return _os_rel
|
[
"def",
"os_release",
"(",
"package",
",",
"base",
"=",
"'essex'",
",",
"reset_cache",
"=",
"False",
")",
":",
"global",
"_os_rel",
"if",
"reset_cache",
":",
"reset_os_release",
"(",
")",
"if",
"_os_rel",
":",
"return",
"_os_rel",
"_os_rel",
"=",
"(",
"get_os_codename_package",
"(",
"package",
",",
"fatal",
"=",
"False",
")",
"or",
"get_os_codename_install_source",
"(",
"config",
"(",
"'openstack-origin'",
")",
")",
"or",
"base",
")",
"return",
"_os_rel"
] | 32.333333 | 25.952381 |
def expand_dataset(X, y_proba, factor=10, random_state=None, extra_arrays=None):
"""
Convert a dataset with float multiclass probabilities to a dataset
with indicator probabilities by duplicating X rows and sampling
true labels.
"""
rng = check_random_state(random_state)
extra_arrays = extra_arrays or []
n_classes = y_proba.shape[1]
classes = np.arange(n_classes, dtype=int)
for el in zip(X, y_proba, *extra_arrays):
x, probs = el[0:2]
rest = el[2:]
for label in rng.choice(classes, size=factor, p=probs):
yield (x, label) + rest
|
[
"def",
"expand_dataset",
"(",
"X",
",",
"y_proba",
",",
"factor",
"=",
"10",
",",
"random_state",
"=",
"None",
",",
"extra_arrays",
"=",
"None",
")",
":",
"rng",
"=",
"check_random_state",
"(",
"random_state",
")",
"extra_arrays",
"=",
"extra_arrays",
"or",
"[",
"]",
"n_classes",
"=",
"y_proba",
".",
"shape",
"[",
"1",
"]",
"classes",
"=",
"np",
".",
"arange",
"(",
"n_classes",
",",
"dtype",
"=",
"int",
")",
"for",
"el",
"in",
"zip",
"(",
"X",
",",
"y_proba",
",",
"*",
"extra_arrays",
")",
":",
"x",
",",
"probs",
"=",
"el",
"[",
"0",
":",
"2",
"]",
"rest",
"=",
"el",
"[",
"2",
":",
"]",
"for",
"label",
"in",
"rng",
".",
"choice",
"(",
"classes",
",",
"size",
"=",
"factor",
",",
"p",
"=",
"probs",
")",
":",
"yield",
"(",
"x",
",",
"label",
")",
"+",
"rest"
] | 39.533333 | 13.666667 |
def receive(self, x, mesh_axis, source_pcoord):
"""Collective receive in groups.
Each group contains the processors that differ only in mesh_axis.
```python
group_size = self.shape[mesh_axis].size
```
Args:
x: a LaidOutTensor
mesh_axis: an integer
source_pcoord: a list of optional integers. Each element is either None
or an integer in [0, group_size). If source_pcoord[k] is None, then the
output for the k-th processor in each group is a zero tensor. If
source_pcoord[k] is not None, then the output for the k-th processor in
each group is equal to the input for the source_pcoord[k]-th processor
in that group.
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
shape = x.tensor_list[0].shape
dtype = x.tensor_list[0].dtype
def _collective_receive(tensor_list, device_list):
ret = []
for pcoord, device in enumerate(device_list):
with tf.device(device):
if source_pcoord[pcoord] is None:
ret.append(tf.zeros(shape, dtype))
else:
ret.append(tf.identity(tensor_list[source_pcoord[pcoord]]))
return ret
return self._collective_with_groups(
x, [mesh_axis], _collective_receive)
|
[
"def",
"receive",
"(",
"self",
",",
"x",
",",
"mesh_axis",
",",
"source_pcoord",
")",
":",
"x",
"=",
"x",
".",
"to_laid_out_tensor",
"(",
")",
"shape",
"=",
"x",
".",
"tensor_list",
"[",
"0",
"]",
".",
"shape",
"dtype",
"=",
"x",
".",
"tensor_list",
"[",
"0",
"]",
".",
"dtype",
"def",
"_collective_receive",
"(",
"tensor_list",
",",
"device_list",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"pcoord",
",",
"device",
"in",
"enumerate",
"(",
"device_list",
")",
":",
"with",
"tf",
".",
"device",
"(",
"device",
")",
":",
"if",
"source_pcoord",
"[",
"pcoord",
"]",
"is",
"None",
":",
"ret",
".",
"append",
"(",
"tf",
".",
"zeros",
"(",
"shape",
",",
"dtype",
")",
")",
"else",
":",
"ret",
".",
"append",
"(",
"tf",
".",
"identity",
"(",
"tensor_list",
"[",
"source_pcoord",
"[",
"pcoord",
"]",
"]",
")",
")",
"return",
"ret",
"return",
"self",
".",
"_collective_with_groups",
"(",
"x",
",",
"[",
"mesh_axis",
"]",
",",
"_collective_receive",
")"
] | 34.583333 | 20.666667 |
def evaluate(self, s):
r"""Evaluate :math:`B(s)` along the curve.
This method acts as a (partial) inverse to :meth:`locate`.
See :meth:`evaluate_multi` for more details.
.. image:: ../../images/curve_evaluate.png
:align: center
.. doctest:: curve-eval
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 0.625, 1.0],
... [0.0, 0.5 , 0.5],
... ])
>>> curve = bezier.Curve(nodes, degree=2)
>>> curve.evaluate(0.75)
array([[0.796875],
[0.46875 ]])
.. testcleanup:: curve-eval
import make_images
make_images.curve_evaluate(curve)
Args:
s (float): Parameter along the curve.
Returns:
numpy.ndarray: The point on the curve (as a two dimensional
NumPy array with a single column).
"""
return _curve_helpers.evaluate_multi(
self._nodes, np.asfortranarray([s])
)
|
[
"def",
"evaluate",
"(",
"self",
",",
"s",
")",
":",
"return",
"_curve_helpers",
".",
"evaluate_multi",
"(",
"self",
".",
"_nodes",
",",
"np",
".",
"asfortranarray",
"(",
"[",
"s",
"]",
")",
")"
] | 27.891892 | 18.405405 |
def difference(self, other, joinBy=None, exact=False):
"""
*Wrapper of* ``DIFFERENCE``
DIFFERENCE is a binary, non-symmetric operator that produces one sample
in the result for each sample of the first operand, by keeping the same
metadata of the first operand sample and only those regions (with their
schema and values) of the first operand sample which do not intersect with
any region in the second operand sample (also known as negative regions)
:param other: GMQLDataset
:param joinBy: (optional) list of metadata attributes. It is used to extract subsets of samples on which
to apply the operator: only those samples in the current and other dataset that have the same
value for each specified attribute are considered when performing the operation
:param exact: boolean. If true, the the regions are considered as intersecting only if their coordinates
are exactly the same
:return: a new GMQLDataset
Example of usage. We compute the exact difference between Example_Dataset_1 and Example_Dataset_2,
considering only the samples with same `antibody`::
import gmql as gl
d1 = gl.get_example_dataset("Example_Dataset_1")
d2 = gl.get_example_dataset("Example_Dataset_2")
result = d1.difference(other=d2, exact=True, joinBy=['antibody'])
"""
if isinstance(other, GMQLDataset):
other_idx = other.__index
else:
raise TypeError("other must be a GMQLDataset. "
"{} was provided".format(type(other)))
if isinstance(joinBy, list) and \
all([isinstance(x, str) for x in joinBy]):
metaJoinCondition = Some(self.opmng.getMetaJoinCondition(joinBy))
elif joinBy is None:
metaJoinCondition = none()
else:
raise TypeError("joinBy must be a list of strings. "
"{} was provided".format(type(joinBy)))
if not isinstance(exact, bool):
raise TypeError("exact must be a boolean. "
"{} was provided".format(type(exact)))
new_index = self.opmng.difference(self.__index, other_idx, metaJoinCondition, exact)
new_local_sources, new_remote_sources = self.__combine_sources(self, other)
new_location = self.__combine_locations(self, other)
return GMQLDataset(index=new_index, location=new_location,
local_sources=new_local_sources,
remote_sources=new_remote_sources,
meta_profile=self.meta_profile)
|
[
"def",
"difference",
"(",
"self",
",",
"other",
",",
"joinBy",
"=",
"None",
",",
"exact",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"GMQLDataset",
")",
":",
"other_idx",
"=",
"other",
".",
"__index",
"else",
":",
"raise",
"TypeError",
"(",
"\"other must be a GMQLDataset. \"",
"\"{} was provided\"",
".",
"format",
"(",
"type",
"(",
"other",
")",
")",
")",
"if",
"isinstance",
"(",
"joinBy",
",",
"list",
")",
"and",
"all",
"(",
"[",
"isinstance",
"(",
"x",
",",
"str",
")",
"for",
"x",
"in",
"joinBy",
"]",
")",
":",
"metaJoinCondition",
"=",
"Some",
"(",
"self",
".",
"opmng",
".",
"getMetaJoinCondition",
"(",
"joinBy",
")",
")",
"elif",
"joinBy",
"is",
"None",
":",
"metaJoinCondition",
"=",
"none",
"(",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"joinBy must be a list of strings. \"",
"\"{} was provided\"",
".",
"format",
"(",
"type",
"(",
"joinBy",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"exact",
",",
"bool",
")",
":",
"raise",
"TypeError",
"(",
"\"exact must be a boolean. \"",
"\"{} was provided\"",
".",
"format",
"(",
"type",
"(",
"exact",
")",
")",
")",
"new_index",
"=",
"self",
".",
"opmng",
".",
"difference",
"(",
"self",
".",
"__index",
",",
"other_idx",
",",
"metaJoinCondition",
",",
"exact",
")",
"new_local_sources",
",",
"new_remote_sources",
"=",
"self",
".",
"__combine_sources",
"(",
"self",
",",
"other",
")",
"new_location",
"=",
"self",
".",
"__combine_locations",
"(",
"self",
",",
"other",
")",
"return",
"GMQLDataset",
"(",
"index",
"=",
"new_index",
",",
"location",
"=",
"new_location",
",",
"local_sources",
"=",
"new_local_sources",
",",
"remote_sources",
"=",
"new_remote_sources",
",",
"meta_profile",
"=",
"self",
".",
"meta_profile",
")"
] | 47.350877 | 28.473684 |
def getCharAtIndex(self, index):
'''
Used for searching, this function masks the complexity behind retrieving a specific character at a specific index
in our compressed BWT.
@param index - the index to retrieve the character from
@param return - return the character in our BWT that's at a particular index (integer format)
'''
#get the bin we should start from
binID = index >> self.bitPower
bwtIndex = self.refFM[binID]
#these are the values that indicate how far in we really are
trueIndex = np.sum(self.partialFM[binID])-self.offsetSum
dist = index-trueIndex
#calculate how big of a region we actually need to 'decompress'
if binID == self.refFM.shape[0]-1:
endRange = self.bwt.shape[0]
else:
endRange = self.refFM[binID+1]+1
while endRange < self.bwt.shape[0] and (self.bwt[endRange] & self.mask) == (self.bwt[endRange-1] & self.mask):
endRange += 1
#extract the symbols and counts associated with each byte
letters = np.bitwise_and(self.bwt[bwtIndex:endRange], self.mask)
counts = np.right_shift(self.bwt[bwtIndex:endRange], self.letterBits, dtype='<u8')
#numpy methods for find the powers
i = 1
same = (letters[0:-1] == letters[1:])
while np.count_nonzero(same) > 0:
(counts[i:])[same] *= self.numPower
i += 1
same = np.bitwise_and(same[0:-1], same[1:])
#these are the true counts after raising to the appropriate power
cs = np.cumsum(counts)
x = np.searchsorted(cs, dist, 'right')
return letters[x]
|
[
"def",
"getCharAtIndex",
"(",
"self",
",",
"index",
")",
":",
"#get the bin we should start from",
"binID",
"=",
"index",
">>",
"self",
".",
"bitPower",
"bwtIndex",
"=",
"self",
".",
"refFM",
"[",
"binID",
"]",
"#these are the values that indicate how far in we really are",
"trueIndex",
"=",
"np",
".",
"sum",
"(",
"self",
".",
"partialFM",
"[",
"binID",
"]",
")",
"-",
"self",
".",
"offsetSum",
"dist",
"=",
"index",
"-",
"trueIndex",
"#calculate how big of a region we actually need to 'decompress'",
"if",
"binID",
"==",
"self",
".",
"refFM",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
":",
"endRange",
"=",
"self",
".",
"bwt",
".",
"shape",
"[",
"0",
"]",
"else",
":",
"endRange",
"=",
"self",
".",
"refFM",
"[",
"binID",
"+",
"1",
"]",
"+",
"1",
"while",
"endRange",
"<",
"self",
".",
"bwt",
".",
"shape",
"[",
"0",
"]",
"and",
"(",
"self",
".",
"bwt",
"[",
"endRange",
"]",
"&",
"self",
".",
"mask",
")",
"==",
"(",
"self",
".",
"bwt",
"[",
"endRange",
"-",
"1",
"]",
"&",
"self",
".",
"mask",
")",
":",
"endRange",
"+=",
"1",
"#extract the symbols and counts associated with each byte",
"letters",
"=",
"np",
".",
"bitwise_and",
"(",
"self",
".",
"bwt",
"[",
"bwtIndex",
":",
"endRange",
"]",
",",
"self",
".",
"mask",
")",
"counts",
"=",
"np",
".",
"right_shift",
"(",
"self",
".",
"bwt",
"[",
"bwtIndex",
":",
"endRange",
"]",
",",
"self",
".",
"letterBits",
",",
"dtype",
"=",
"'<u8'",
")",
"#numpy methods for find the powers",
"i",
"=",
"1",
"same",
"=",
"(",
"letters",
"[",
"0",
":",
"-",
"1",
"]",
"==",
"letters",
"[",
"1",
":",
"]",
")",
"while",
"np",
".",
"count_nonzero",
"(",
"same",
")",
">",
"0",
":",
"(",
"counts",
"[",
"i",
":",
"]",
")",
"[",
"same",
"]",
"*=",
"self",
".",
"numPower",
"i",
"+=",
"1",
"same",
"=",
"np",
".",
"bitwise_and",
"(",
"same",
"[",
"0",
":",
"-",
"1",
"]",
",",
"same",
"[",
"1",
":",
"]",
")",
"#these are the true counts after raising to the appropriate power",
"cs",
"=",
"np",
".",
"cumsum",
"(",
"counts",
")",
"x",
"=",
"np",
".",
"searchsorted",
"(",
"cs",
",",
"dist",
",",
"'right'",
")",
"return",
"letters",
"[",
"x",
"]"
] | 43.820513 | 22.487179 |
def line_alignment(self):
"""Alignment, one of `inner`, `outer`, `center`."""
key = self._data.get(b'strokeStyleLineAlignment').enum
return self.STROKE_STYLE_LINE_ALIGNMENTS.get(key, str(key))
|
[
"def",
"line_alignment",
"(",
"self",
")",
":",
"key",
"=",
"self",
".",
"_data",
".",
"get",
"(",
"b'strokeStyleLineAlignment'",
")",
".",
"enum",
"return",
"self",
".",
"STROKE_STYLE_LINE_ALIGNMENTS",
".",
"get",
"(",
"key",
",",
"str",
"(",
"key",
")",
")"
] | 53.25 | 16 |
def new_instance(type, frum, schema=None):
"""
Factory!
"""
if not type2container:
_delayed_imports()
if isinstance(frum, Container):
return frum
elif isinstance(frum, _Cube):
return frum
elif isinstance(frum, _Query):
return _run(frum)
elif is_many(frum):
return _ListContainer(frum)
elif is_text(frum):
# USE DEFAULT STORAGE TO FIND Container
if not config.default.settings:
Log.error("expecting jx_base.container.config.default.settings to contain default elasticsearch connection info")
settings = set_default(
{
"index": join_field(split_field(frum)[:1:]),
"name": frum,
},
config.default.settings
)
settings.type = None # WE DO NOT WANT TO INFLUENCE THE TYPE BECAUSE NONE IS IN THE frum STRING ANYWAY
return type2container["elasticsearch"](settings)
elif is_data(frum):
frum = wrap(frum)
if frum.type and type2container[frum.type]:
return type2container[frum.type](frum.settings)
elif frum["from"]:
frum = copy(frum)
frum["from"] = Container(frum["from"])
return _Query.wrap(frum)
else:
Log.error("Do not know how to handle {{frum|json}}", frum=frum)
else:
Log.error("Do not know how to handle {{type}}", type=frum.__class__.__name__)
|
[
"def",
"new_instance",
"(",
"type",
",",
"frum",
",",
"schema",
"=",
"None",
")",
":",
"if",
"not",
"type2container",
":",
"_delayed_imports",
"(",
")",
"if",
"isinstance",
"(",
"frum",
",",
"Container",
")",
":",
"return",
"frum",
"elif",
"isinstance",
"(",
"frum",
",",
"_Cube",
")",
":",
"return",
"frum",
"elif",
"isinstance",
"(",
"frum",
",",
"_Query",
")",
":",
"return",
"_run",
"(",
"frum",
")",
"elif",
"is_many",
"(",
"frum",
")",
":",
"return",
"_ListContainer",
"(",
"frum",
")",
"elif",
"is_text",
"(",
"frum",
")",
":",
"# USE DEFAULT STORAGE TO FIND Container",
"if",
"not",
"config",
".",
"default",
".",
"settings",
":",
"Log",
".",
"error",
"(",
"\"expecting jx_base.container.config.default.settings to contain default elasticsearch connection info\"",
")",
"settings",
"=",
"set_default",
"(",
"{",
"\"index\"",
":",
"join_field",
"(",
"split_field",
"(",
"frum",
")",
"[",
":",
"1",
":",
"]",
")",
",",
"\"name\"",
":",
"frum",
",",
"}",
",",
"config",
".",
"default",
".",
"settings",
")",
"settings",
".",
"type",
"=",
"None",
"# WE DO NOT WANT TO INFLUENCE THE TYPE BECAUSE NONE IS IN THE frum STRING ANYWAY",
"return",
"type2container",
"[",
"\"elasticsearch\"",
"]",
"(",
"settings",
")",
"elif",
"is_data",
"(",
"frum",
")",
":",
"frum",
"=",
"wrap",
"(",
"frum",
")",
"if",
"frum",
".",
"type",
"and",
"type2container",
"[",
"frum",
".",
"type",
"]",
":",
"return",
"type2container",
"[",
"frum",
".",
"type",
"]",
"(",
"frum",
".",
"settings",
")",
"elif",
"frum",
"[",
"\"from\"",
"]",
":",
"frum",
"=",
"copy",
"(",
"frum",
")",
"frum",
"[",
"\"from\"",
"]",
"=",
"Container",
"(",
"frum",
"[",
"\"from\"",
"]",
")",
"return",
"_Query",
".",
"wrap",
"(",
"frum",
")",
"else",
":",
"Log",
".",
"error",
"(",
"\"Do not know how to handle {{frum|json}}\"",
",",
"frum",
"=",
"frum",
")",
"else",
":",
"Log",
".",
"error",
"(",
"\"Do not know how to handle {{type}}\"",
",",
"type",
"=",
"frum",
".",
"__class__",
".",
"__name__",
")"
] | 38.219512 | 18.073171 |
def _get_raw(source, bitarray):
''' Get raw data as integer, based on offset and size '''
offset = int(source['offset'])
size = int(source['size'])
return int(''.join(['1' if digit else '0' for digit in bitarray[offset:offset + size]]), 2)
|
[
"def",
"_get_raw",
"(",
"source",
",",
"bitarray",
")",
":",
"offset",
"=",
"int",
"(",
"source",
"[",
"'offset'",
"]",
")",
"size",
"=",
"int",
"(",
"source",
"[",
"'size'",
"]",
")",
"return",
"int",
"(",
"''",
".",
"join",
"(",
"[",
"'1'",
"if",
"digit",
"else",
"'0'",
"for",
"digit",
"in",
"bitarray",
"[",
"offset",
":",
"offset",
"+",
"size",
"]",
"]",
")",
",",
"2",
")"
] | 53.4 | 20.2 |
def parse_paragraph(self, markup):
""" Creates a list from lines of text in a paragraph.
Each line of text is a new item in the list,
except lists and preformatted chunks (<li> and <pre>),
these are kept together as a single chunk.
Lists are formatted using parse_paragraph_list().
Empty lines are stripped from the output.
Indentation (i.e. lines starting with ":") is ignored.
Called from parse_paragraphs() method.
"""
s = self.plain(markup)
# Add an extra linebreak between the last list item
# and the normal line following after it, so they don't stick together, e.g.
# **[[Alin Magic]], magic used in the videogame ''[[Rise of Nations: Rise of Legends]]''
# In '''popular culture''':
# * [[Magic (film)|''Magic'' (film)]], a 1978 film starring Anthony Hopkins and Ann-Margret
s = re.sub(re.compile("\n([*#;].*?)\n([^*#?])", re.DOTALL), "\n\\1\n\n\\2", s)
# This keeps list items with linebreaks
# between them nicely together.
s = re.sub("\n{2,3}([*#;])", "\n\\1", s)
chunks = []
ch = ""
i = 1
for chunk in s.split("\n"):
if chunk.startswith(":"):
chunk = chunk.lstrip(":")
if len(chunk.strip()) > 1:
# Leave out taxoboxes and infoboxes.
if not chunk.startswith("|"):
ch += chunk + "\n"
if ch.strip() != "":
if not re.search("^[ *#;]", chunk):
ch = self.parse_paragraph_list(ch)
chunks.append(ch.rstrip())
ch = ""
if ch.strip() != "":
ch = self.parse_paragraph_list(ch)
chunks.append(ch.strip())
return chunks
|
[
"def",
"parse_paragraph",
"(",
"self",
",",
"markup",
")",
":",
"s",
"=",
"self",
".",
"plain",
"(",
"markup",
")",
"# Add an extra linebreak between the last list item",
"# and the normal line following after it, so they don't stick together, e.g.",
"# **[[Alin Magic]], magic used in the videogame ''[[Rise of Nations: Rise of Legends]]''",
"# In '''popular culture''':",
"# * [[Magic (film)|''Magic'' (film)]], a 1978 film starring Anthony Hopkins and Ann-Margret",
"s",
"=",
"re",
".",
"sub",
"(",
"re",
".",
"compile",
"(",
"\"\\n([*#;].*?)\\n([^*#?])\"",
",",
"re",
".",
"DOTALL",
")",
",",
"\"\\n\\\\1\\n\\n\\\\2\"",
",",
"s",
")",
"# This keeps list items with linebreaks ",
"# between them nicely together.",
"s",
"=",
"re",
".",
"sub",
"(",
"\"\\n{2,3}([*#;])\"",
",",
"\"\\n\\\\1\"",
",",
"s",
")",
"chunks",
"=",
"[",
"]",
"ch",
"=",
"\"\"",
"i",
"=",
"1",
"for",
"chunk",
"in",
"s",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"if",
"chunk",
".",
"startswith",
"(",
"\":\"",
")",
":",
"chunk",
"=",
"chunk",
".",
"lstrip",
"(",
"\":\"",
")",
"if",
"len",
"(",
"chunk",
".",
"strip",
"(",
")",
")",
">",
"1",
":",
"# Leave out taxoboxes and infoboxes.",
"if",
"not",
"chunk",
".",
"startswith",
"(",
"\"|\"",
")",
":",
"ch",
"+=",
"chunk",
"+",
"\"\\n\"",
"if",
"ch",
".",
"strip",
"(",
")",
"!=",
"\"\"",
":",
"if",
"not",
"re",
".",
"search",
"(",
"\"^[ *#;]\"",
",",
"chunk",
")",
":",
"ch",
"=",
"self",
".",
"parse_paragraph_list",
"(",
"ch",
")",
"chunks",
".",
"append",
"(",
"ch",
".",
"rstrip",
"(",
")",
")",
"ch",
"=",
"\"\"",
"if",
"ch",
".",
"strip",
"(",
")",
"!=",
"\"\"",
":",
"ch",
"=",
"self",
".",
"parse_paragraph_list",
"(",
"ch",
")",
"chunks",
".",
"append",
"(",
"ch",
".",
"strip",
"(",
")",
")",
"return",
"chunks"
] | 38.5625 | 17.645833 |
def extractInputForTP(self, tm):
"""
Extract inputs for TP from the state of temporal memory
three information are extracted
1. correctly predicted cells
2. all active cells
3. bursting cells (unpredicted input)
"""
# bursting cells in layer 4
burstingColumns = tm.activeState["t"].sum(axis=1)
burstingColumns[ burstingColumns < tm.cellsPerColumn ] = 0
burstingColumns[ burstingColumns == tm.cellsPerColumn ] = 1
# print "Bursting column indices=",burstingColumns.nonzero()[0]
# correctly predicted cells in layer 4
correctlyPredictedCells = numpy.zeros(self._inputDimensions).astype(realDType)
idx = (tm.predictedState["t-1"] + tm.activeState["t"]) == 2
idx = idx.reshape(self._inputDimensions)
correctlyPredictedCells[idx] = 1.0
# print "Predicted->active cell indices=",correctlyPredictedCells.nonzero()[0]
# all currently active cells in layer 4
spInputVector = tm.learnState["t"].reshape(self._inputDimensions)
# spInputVector = tm.activeState["t"].reshape(self._inputDimensions)
return (correctlyPredictedCells, spInputVector, burstingColumns)
|
[
"def",
"extractInputForTP",
"(",
"self",
",",
"tm",
")",
":",
"# bursting cells in layer 4",
"burstingColumns",
"=",
"tm",
".",
"activeState",
"[",
"\"t\"",
"]",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"burstingColumns",
"[",
"burstingColumns",
"<",
"tm",
".",
"cellsPerColumn",
"]",
"=",
"0",
"burstingColumns",
"[",
"burstingColumns",
"==",
"tm",
".",
"cellsPerColumn",
"]",
"=",
"1",
"# print \"Bursting column indices=\",burstingColumns.nonzero()[0]",
"# correctly predicted cells in layer 4",
"correctlyPredictedCells",
"=",
"numpy",
".",
"zeros",
"(",
"self",
".",
"_inputDimensions",
")",
".",
"astype",
"(",
"realDType",
")",
"idx",
"=",
"(",
"tm",
".",
"predictedState",
"[",
"\"t-1\"",
"]",
"+",
"tm",
".",
"activeState",
"[",
"\"t\"",
"]",
")",
"==",
"2",
"idx",
"=",
"idx",
".",
"reshape",
"(",
"self",
".",
"_inputDimensions",
")",
"correctlyPredictedCells",
"[",
"idx",
"]",
"=",
"1.0",
"# print \"Predicted->active cell indices=\",correctlyPredictedCells.nonzero()[0]",
"# all currently active cells in layer 4",
"spInputVector",
"=",
"tm",
".",
"learnState",
"[",
"\"t\"",
"]",
".",
"reshape",
"(",
"self",
".",
"_inputDimensions",
")",
"# spInputVector = tm.activeState[\"t\"].reshape(self._inputDimensions)",
"return",
"(",
"correctlyPredictedCells",
",",
"spInputVector",
",",
"burstingColumns",
")"
] | 41.296296 | 19.222222 |
def to_output(self, value):
"""Convert value to process output format."""
return {self.name: [self.inner.to_output(v)[self.name] for v in value]}
|
[
"def",
"to_output",
"(",
"self",
",",
"value",
")",
":",
"return",
"{",
"self",
".",
"name",
":",
"[",
"self",
".",
"inner",
".",
"to_output",
"(",
"v",
")",
"[",
"self",
".",
"name",
"]",
"for",
"v",
"in",
"value",
"]",
"}"
] | 53 | 17.333333 |
def nma_attribute(self, stmt, p_elem, pset=None):
"""Map `stmt` to a NETMOD-specific attribute.
The name of the attribute is the same as the 'keyword' of
`stmt`.
"""
att = "nma:" + stmt.keyword
if att not in p_elem.attr:
p_elem.attr[att] = stmt.arg
|
[
"def",
"nma_attribute",
"(",
"self",
",",
"stmt",
",",
"p_elem",
",",
"pset",
"=",
"None",
")",
":",
"att",
"=",
"\"nma:\"",
"+",
"stmt",
".",
"keyword",
"if",
"att",
"not",
"in",
"p_elem",
".",
"attr",
":",
"p_elem",
".",
"attr",
"[",
"att",
"]",
"=",
"stmt",
".",
"arg"
] | 33.444444 | 12.333333 |
def classifyParameters(self):
"""Return (arguments, options, outputs) tuple. Together, the
three lists contain all parameters (recursively fetched from
all parameter groups), classified into optional parameters,
required ones (with an index), and simple output parameters
(that would get written to a file using
--returnparameterfile). `arguments` contains the required
arguments, already sorted by index."""
arguments = []
options = []
outputs = []
for parameter in self.parameters():
if parameter.channel == 'output' and not parameter.isExternalType():
outputs.append(parameter)
elif parameter.index is not None:
arguments.append(parameter)
if parameter.flag is not None or parameter.longflag is not None:
logger.warning("Parameter '%s' has both index=%d and flag set." % (
parameter.identifier(), parameter.index))
elif parameter.flag or parameter.longflag:
options.append(parameter)
else:
logger.warning("Parameter '%s' cannot be passed (missing flag, longflag, or index)!" % parameter.name)
arguments.sort(key = lambda parameter: parameter.index)
return (arguments, options, outputs)
|
[
"def",
"classifyParameters",
"(",
"self",
")",
":",
"arguments",
"=",
"[",
"]",
"options",
"=",
"[",
"]",
"outputs",
"=",
"[",
"]",
"for",
"parameter",
"in",
"self",
".",
"parameters",
"(",
")",
":",
"if",
"parameter",
".",
"channel",
"==",
"'output'",
"and",
"not",
"parameter",
".",
"isExternalType",
"(",
")",
":",
"outputs",
".",
"append",
"(",
"parameter",
")",
"elif",
"parameter",
".",
"index",
"is",
"not",
"None",
":",
"arguments",
".",
"append",
"(",
"parameter",
")",
"if",
"parameter",
".",
"flag",
"is",
"not",
"None",
"or",
"parameter",
".",
"longflag",
"is",
"not",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Parameter '%s' has both index=%d and flag set.\"",
"%",
"(",
"parameter",
".",
"identifier",
"(",
")",
",",
"parameter",
".",
"index",
")",
")",
"elif",
"parameter",
".",
"flag",
"or",
"parameter",
".",
"longflag",
":",
"options",
".",
"append",
"(",
"parameter",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Parameter '%s' cannot be passed (missing flag, longflag, or index)!\"",
"%",
"parameter",
".",
"name",
")",
"arguments",
".",
"sort",
"(",
"key",
"=",
"lambda",
"parameter",
":",
"parameter",
".",
"index",
")",
"return",
"(",
"arguments",
",",
"options",
",",
"outputs",
")"
] | 51.615385 | 20.423077 |
def fix(self, param):
"""
Disable parameter optimization.
Parameters
----------
param : str
Possible values are ``"delta"``, ``"beta"``, and ``"scale"``.
"""
if param == "delta":
super()._fix("logistic")
else:
self._fix[param] = True
|
[
"def",
"fix",
"(",
"self",
",",
"param",
")",
":",
"if",
"param",
"==",
"\"delta\"",
":",
"super",
"(",
")",
".",
"_fix",
"(",
"\"logistic\"",
")",
"else",
":",
"self",
".",
"_fix",
"[",
"param",
"]",
"=",
"True"
] | 24.769231 | 15.846154 |
def delete_scan(self, scan_id):
""" Delete a scan if fully finished. """
if self.get_status(scan_id) == ScanStatus.RUNNING:
return False
self.scans_table.pop(scan_id)
if len(self.scans_table) == 0:
del self.data_manager
self.data_manager = None
return True
|
[
"def",
"delete_scan",
"(",
"self",
",",
"scan_id",
")",
":",
"if",
"self",
".",
"get_status",
"(",
"scan_id",
")",
"==",
"ScanStatus",
".",
"RUNNING",
":",
"return",
"False",
"self",
".",
"scans_table",
".",
"pop",
"(",
"scan_id",
")",
"if",
"len",
"(",
"self",
".",
"scans_table",
")",
"==",
"0",
":",
"del",
"self",
".",
"data_manager",
"self",
".",
"data_manager",
"=",
"None",
"return",
"True"
] | 32.4 | 12 |
def _pooling_output_shape(input_shape, pool_size=(2, 2),
strides=None, padding='VALID'):
"""Helper: compute the output shape for the pooling layer."""
dims = (1,) + pool_size + (1,) # NHWC
spatial_strides = strides or (1,) * len(pool_size)
strides = (1,) + spatial_strides + (1,)
pads = padtype_to_pads(input_shape, dims, strides, padding)
operand_padded = onp.add(input_shape, onp.add(*zip(*pads)))
t = onp.floor_divide(onp.subtract(operand_padded, dims), strides) + 1
return tuple(t)
|
[
"def",
"_pooling_output_shape",
"(",
"input_shape",
",",
"pool_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"strides",
"=",
"None",
",",
"padding",
"=",
"'VALID'",
")",
":",
"dims",
"=",
"(",
"1",
",",
")",
"+",
"pool_size",
"+",
"(",
"1",
",",
")",
"# NHWC",
"spatial_strides",
"=",
"strides",
"or",
"(",
"1",
",",
")",
"*",
"len",
"(",
"pool_size",
")",
"strides",
"=",
"(",
"1",
",",
")",
"+",
"spatial_strides",
"+",
"(",
"1",
",",
")",
"pads",
"=",
"padtype_to_pads",
"(",
"input_shape",
",",
"dims",
",",
"strides",
",",
"padding",
")",
"operand_padded",
"=",
"onp",
".",
"add",
"(",
"input_shape",
",",
"onp",
".",
"add",
"(",
"*",
"zip",
"(",
"*",
"pads",
")",
")",
")",
"t",
"=",
"onp",
".",
"floor_divide",
"(",
"onp",
".",
"subtract",
"(",
"operand_padded",
",",
"dims",
")",
",",
"strides",
")",
"+",
"1",
"return",
"tuple",
"(",
"t",
")"
] | 51.9 | 14.2 |
def is_command(self, text: str) -> bool:
"""
checks for presence of shebang in the first character of the text
"""
if text[0] in self.shebangs:
return True
return False
|
[
"def",
"is_command",
"(",
"self",
",",
"text",
":",
"str",
")",
"->",
"bool",
":",
"if",
"text",
"[",
"0",
"]",
"in",
"self",
".",
"shebangs",
":",
"return",
"True",
"return",
"False"
] | 26.75 | 14.25 |
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_low_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 7)
self.set_attributes(priority, address, rtr)
self.channel = self.byte_to_channel(data[0])
self.needs_valid_channel(self.channel, 5)
self.timeout = data[1] # Omzetter seconden ????
self.status = data[2]
self.led_status = data[3]
self.blind_position = data[4]
self.locked_inhibit_forced = data[5]
self.alarm_auto_mode_selection = data[6]
|
[
"def",
"populate",
"(",
"self",
",",
"priority",
",",
"address",
",",
"rtr",
",",
"data",
")",
":",
"assert",
"isinstance",
"(",
"data",
",",
"bytes",
")",
"self",
".",
"needs_low_priority",
"(",
"priority",
")",
"self",
".",
"needs_no_rtr",
"(",
"rtr",
")",
"self",
".",
"needs_data",
"(",
"data",
",",
"7",
")",
"self",
".",
"set_attributes",
"(",
"priority",
",",
"address",
",",
"rtr",
")",
"self",
".",
"channel",
"=",
"self",
".",
"byte_to_channel",
"(",
"data",
"[",
"0",
"]",
")",
"self",
".",
"needs_valid_channel",
"(",
"self",
".",
"channel",
",",
"5",
")",
"self",
".",
"timeout",
"=",
"data",
"[",
"1",
"]",
"# Omzetter seconden ????",
"self",
".",
"status",
"=",
"data",
"[",
"2",
"]",
"self",
".",
"led_status",
"=",
"data",
"[",
"3",
"]",
"self",
".",
"blind_position",
"=",
"data",
"[",
"4",
"]",
"self",
".",
"locked_inhibit_forced",
"=",
"data",
"[",
"5",
"]",
"self",
".",
"alarm_auto_mode_selection",
"=",
"data",
"[",
"6",
"]"
] | 37.117647 | 7.588235 |
def search_users(self, username_keyword, limit=10):
"""
Searches for users whose username matches ``username_keyword``, and returns
a list of matched users.
:param str username_keyword: keyword to search with
:param int limit: maximum number of returned users
:return: a list of matched users
:rtype: List[GogsUser]
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
params = {"q": username_keyword, "limit": limit}
response = self.get("/users/search", params=params)
return [GogsUser.from_json(user_json) for user_json in response.json()["data"]]
|
[
"def",
"search_users",
"(",
"self",
",",
"username_keyword",
",",
"limit",
"=",
"10",
")",
":",
"params",
"=",
"{",
"\"q\"",
":",
"username_keyword",
",",
"\"limit\"",
":",
"limit",
"}",
"response",
"=",
"self",
".",
"get",
"(",
"\"/users/search\"",
",",
"params",
"=",
"params",
")",
"return",
"[",
"GogsUser",
".",
"from_json",
"(",
"user_json",
")",
"for",
"user_json",
"in",
"response",
".",
"json",
"(",
")",
"[",
"\"data\"",
"]",
"]"
] | 48 | 19.6 |
def poll(self):
"""Return pairs of run ids and results of finish event loops.
"""
ret = self.communicationChannel.receive_finished()
self.nruns -= len(ret)
return ret
|
[
"def",
"poll",
"(",
"self",
")",
":",
"ret",
"=",
"self",
".",
"communicationChannel",
".",
"receive_finished",
"(",
")",
"self",
".",
"nruns",
"-=",
"len",
"(",
"ret",
")",
"return",
"ret"
] | 33.5 | 12.5 |
def _group_report(self,group,name):
"""Report summary for a given job group.
Return True if the group had any elements."""
if group:
print '%s jobs:' % name
for job in group:
print '%s : %s' % (job.num,job)
print
return True
|
[
"def",
"_group_report",
"(",
"self",
",",
"group",
",",
"name",
")",
":",
"if",
"group",
":",
"print",
"'%s jobs:'",
"%",
"name",
"for",
"job",
"in",
"group",
":",
"print",
"'%s : %s'",
"%",
"(",
"job",
".",
"num",
",",
"job",
")",
"print",
"return",
"True"
] | 27.636364 | 15.545455 |
def set_detail_level(self, detail_levels):
"""
Sets the detail levels from the input dictionary in detail_levels.
"""
if detail_levels is None:
return
self.detail_levels = detail_levels
if 'api' in detail_levels:
self.api_detail_level = detail_levels['api']
if 'http' in detail_levels:
self.http_detail_level = detail_levels['http']
if isinstance(self.api_detail_level, int):
self.api_maxlen = self.api_detail_level
if isinstance(self.http_detail_level, int):
self.http_maxlen = self.http_detail_level
|
[
"def",
"set_detail_level",
"(",
"self",
",",
"detail_levels",
")",
":",
"if",
"detail_levels",
"is",
"None",
":",
"return",
"self",
".",
"detail_levels",
"=",
"detail_levels",
"if",
"'api'",
"in",
"detail_levels",
":",
"self",
".",
"api_detail_level",
"=",
"detail_levels",
"[",
"'api'",
"]",
"if",
"'http'",
"in",
"detail_levels",
":",
"self",
".",
"http_detail_level",
"=",
"detail_levels",
"[",
"'http'",
"]",
"if",
"isinstance",
"(",
"self",
".",
"api_detail_level",
",",
"int",
")",
":",
"self",
".",
"api_maxlen",
"=",
"self",
".",
"api_detail_level",
"if",
"isinstance",
"(",
"self",
".",
"http_detail_level",
",",
"int",
")",
":",
"self",
".",
"http_maxlen",
"=",
"self",
".",
"http_detail_level"
] | 38.6875 | 12.3125 |
def is_match(self, match):
"""Return whether this model is the same as `match`.
Matches if the model is the same as or has the same name as `match`.
"""
result = False
if self == match:
result = True
elif isinstance(match, str) and fnmatchcase(self.name, match):
result = True # Found by instance or name
return result
|
[
"def",
"is_match",
"(",
"self",
",",
"match",
")",
":",
"result",
"=",
"False",
"if",
"self",
"==",
"match",
":",
"result",
"=",
"True",
"elif",
"isinstance",
"(",
"match",
",",
"str",
")",
"and",
"fnmatchcase",
"(",
"self",
".",
"name",
",",
"match",
")",
":",
"result",
"=",
"True",
"# Found by instance or name",
"return",
"result"
] | 35.454545 | 18.272727 |
def _check_type(name, obj, expected_type):
""" Raise a TypeError if object is not of expected type """
if not isinstance(obj, expected_type):
raise TypeError(
'"%s" must be an a %s' % (name, expected_type.__name__)
)
|
[
"def",
"_check_type",
"(",
"name",
",",
"obj",
",",
"expected_type",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"expected_type",
")",
":",
"raise",
"TypeError",
"(",
"'\"%s\" must be an a %s'",
"%",
"(",
"name",
",",
"expected_type",
".",
"__name__",
")",
")"
] | 41.166667 | 13 |
def fetchJobStoreFiles(jobStore, options):
"""
Takes a list of file names as glob patterns, searches for these within a
given directory, and attempts to take all of the files found and copy them
into options.localFilePath.
:param jobStore: A fileJobStore object.
:param options.fetch: List of file glob patterns to search
for in the jobStore and copy into options.localFilePath.
:param options.localFilePath: Local directory to copy files into.
:param options.jobStore: The path to the jobStore directory.
"""
for jobStoreFile in options.fetch:
jobStoreHits = recursiveGlob(directoryname=options.jobStore,
glob_pattern=jobStoreFile)
for jobStoreFileID in jobStoreHits:
logger.debug("Copying job store file: %s to %s",
jobStoreFileID,
options.localFilePath[0])
jobStore.readFile(jobStoreFileID,
os.path.join(options.localFilePath[0],
os.path.basename(jobStoreFileID)),
symlink=options.useSymlinks)
|
[
"def",
"fetchJobStoreFiles",
"(",
"jobStore",
",",
"options",
")",
":",
"for",
"jobStoreFile",
"in",
"options",
".",
"fetch",
":",
"jobStoreHits",
"=",
"recursiveGlob",
"(",
"directoryname",
"=",
"options",
".",
"jobStore",
",",
"glob_pattern",
"=",
"jobStoreFile",
")",
"for",
"jobStoreFileID",
"in",
"jobStoreHits",
":",
"logger",
".",
"debug",
"(",
"\"Copying job store file: %s to %s\"",
",",
"jobStoreFileID",
",",
"options",
".",
"localFilePath",
"[",
"0",
"]",
")",
"jobStore",
".",
"readFile",
"(",
"jobStoreFileID",
",",
"os",
".",
"path",
".",
"join",
"(",
"options",
".",
"localFilePath",
"[",
"0",
"]",
",",
"os",
".",
"path",
".",
"basename",
"(",
"jobStoreFileID",
")",
")",
",",
"symlink",
"=",
"options",
".",
"useSymlinks",
")"
] | 49.478261 | 16.869565 |
def pmdec(self,*args,**kwargs):
"""
NAME:
pmdec
PURPOSE:
return proper motion in declination (in mas/yr)
INPUT:
t - (optional) time at which to get pmdec
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
OR Orbit object that corresponds to the orbit
of the observer
Y is ignored and always assumed to be zero
ro= distance in kpc corresponding to R=1. (default=Object-wide default)
vo= velocity in km/s corresponding to v=1. (default=Object-wide default)
OUTPUT:
pm_dec(t) in mas/yr
HISTORY:
2011-02-24 - Written - Bovy (NYU)
"""
_check_roSet(self,kwargs,'pmdec')
_check_voSet(self,kwargs,'pmdec')
pmrapmdec= self._pmrapmdec(*args,**kwargs)
return pmrapmdec[:,1]
|
[
"def",
"pmdec",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_check_roSet",
"(",
"self",
",",
"kwargs",
",",
"'pmdec'",
")",
"_check_voSet",
"(",
"self",
",",
"kwargs",
",",
"'pmdec'",
")",
"pmrapmdec",
"=",
"self",
".",
"_pmrapmdec",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"pmrapmdec",
"[",
":",
",",
"1",
"]"
] | 41.25 | 18.666667 |
def _check_min_max_range(self, var, test_ctx):
"""
Checks that either both valid_min and valid_max exist, or valid_range
exists.
"""
if 'valid_range' in var.ncattrs():
test_ctx.assert_true(var.valid_range.dtype == var.dtype and
len(var.valid_range) == 2
and var.valid_range[0] <= var.valid_range[1],
"valid_range must be a two element vector of min followed by max with the same data type as {}".format(var.name)
)
else:
for bound in ('valid_min', 'valid_max'):
v_bound = getattr(var, bound, '')
warn_msg = '{} attribute should exist, have the same type as {}, and not be empty or valid_range should be defined'.format(bound, var.name)
# need to special case str attributes since they aren't directly
# comparable to numpy dtypes
if isinstance(v_bound, six.string_types):
test_ctx.assert_true(v_bound != '' and
var.dtype.char == 'S', warn_msg)
# otherwise compare the numpy types directly
else:
test_ctx.assert_true(v_bound.dtype == var.dtype, warn_msg)
return test_ctx
|
[
"def",
"_check_min_max_range",
"(",
"self",
",",
"var",
",",
"test_ctx",
")",
":",
"if",
"'valid_range'",
"in",
"var",
".",
"ncattrs",
"(",
")",
":",
"test_ctx",
".",
"assert_true",
"(",
"var",
".",
"valid_range",
".",
"dtype",
"==",
"var",
".",
"dtype",
"and",
"len",
"(",
"var",
".",
"valid_range",
")",
"==",
"2",
"and",
"var",
".",
"valid_range",
"[",
"0",
"]",
"<=",
"var",
".",
"valid_range",
"[",
"1",
"]",
",",
"\"valid_range must be a two element vector of min followed by max with the same data type as {}\"",
".",
"format",
"(",
"var",
".",
"name",
")",
")",
"else",
":",
"for",
"bound",
"in",
"(",
"'valid_min'",
",",
"'valid_max'",
")",
":",
"v_bound",
"=",
"getattr",
"(",
"var",
",",
"bound",
",",
"''",
")",
"warn_msg",
"=",
"'{} attribute should exist, have the same type as {}, and not be empty or valid_range should be defined'",
".",
"format",
"(",
"bound",
",",
"var",
".",
"name",
")",
"# need to special case str attributes since they aren't directly",
"# comparable to numpy dtypes",
"if",
"isinstance",
"(",
"v_bound",
",",
"six",
".",
"string_types",
")",
":",
"test_ctx",
".",
"assert_true",
"(",
"v_bound",
"!=",
"''",
"and",
"var",
".",
"dtype",
".",
"char",
"==",
"'S'",
",",
"warn_msg",
")",
"# otherwise compare the numpy types directly",
"else",
":",
"test_ctx",
".",
"assert_true",
"(",
"v_bound",
".",
"dtype",
"==",
"var",
".",
"dtype",
",",
"warn_msg",
")",
"return",
"test_ctx"
] | 56.291667 | 26.541667 |
def discard_config(self):
"""Discard changes (rollback 0)."""
self.device.cu.rollback(rb_id=0)
if not self.config_lock:
self._unlock()
|
[
"def",
"discard_config",
"(",
"self",
")",
":",
"self",
".",
"device",
".",
"cu",
".",
"rollback",
"(",
"rb_id",
"=",
"0",
")",
"if",
"not",
"self",
".",
"config_lock",
":",
"self",
".",
"_unlock",
"(",
")"
] | 33.2 | 7.4 |
def update(self):
"""Update repository from its remote.
Calling this method, the repository will be synchronized with
the remote repository using 'fetch' command for 'heads' refs.
Any commit stored in the local copy will be removed; refs
will be overwritten.
:raises RepositoryError: when an error occurs updating the
repository
"""
cmd_update = ['git', 'fetch', 'origin', '+refs/heads/*:refs/heads/*', '--prune']
self._exec(cmd_update, cwd=self.dirpath, env=self.gitenv)
logger.debug("Git %s repository updated into %s",
self.uri, self.dirpath)
|
[
"def",
"update",
"(",
"self",
")",
":",
"cmd_update",
"=",
"[",
"'git'",
",",
"'fetch'",
",",
"'origin'",
",",
"'+refs/heads/*:refs/heads/*'",
",",
"'--prune'",
"]",
"self",
".",
"_exec",
"(",
"cmd_update",
",",
"cwd",
"=",
"self",
".",
"dirpath",
",",
"env",
"=",
"self",
".",
"gitenv",
")",
"logger",
".",
"debug",
"(",
"\"Git %s repository updated into %s\"",
",",
"self",
".",
"uri",
",",
"self",
".",
"dirpath",
")"
] | 40.375 | 23.5 |
def update_mapping_meta(self, doc_type, values, indices=None):
"""
Update mapping meta
:param doc_type: a doc type or a list of doctypes
:param values: the dict of meta
:param indices: a list of indices
:return:
"""
indices = self._validate_indices(indices)
for index in indices:
mapping = self.mappings.get_doctype(index, doc_type)
if mapping is None:
continue
meta = mapping.get_meta()
meta.update(values)
mapping = {doc_type: {"_meta": meta}}
self.indices.put_mapping(doc_type=doc_type, mapping=mapping, indices=indices)
|
[
"def",
"update_mapping_meta",
"(",
"self",
",",
"doc_type",
",",
"values",
",",
"indices",
"=",
"None",
")",
":",
"indices",
"=",
"self",
".",
"_validate_indices",
"(",
"indices",
")",
"for",
"index",
"in",
"indices",
":",
"mapping",
"=",
"self",
".",
"mappings",
".",
"get_doctype",
"(",
"index",
",",
"doc_type",
")",
"if",
"mapping",
"is",
"None",
":",
"continue",
"meta",
"=",
"mapping",
".",
"get_meta",
"(",
")",
"meta",
".",
"update",
"(",
"values",
")",
"mapping",
"=",
"{",
"doc_type",
":",
"{",
"\"_meta\"",
":",
"meta",
"}",
"}",
"self",
".",
"indices",
".",
"put_mapping",
"(",
"doc_type",
"=",
"doc_type",
",",
"mapping",
"=",
"mapping",
",",
"indices",
"=",
"indices",
")"
] | 39.235294 | 12.764706 |
def check_against_chunks(self, chunks):
# type: (Iterator[bytes]) -> None
"""Check good hashes against ones built from iterable of chunks of
data.
Raise HashMismatch if none match.
"""
gots = {}
for hash_name in iterkeys(self._allowed):
try:
gots[hash_name] = hashlib.new(hash_name)
except (ValueError, TypeError):
raise InstallationError('Unknown hash name: %s' % hash_name)
for chunk in chunks:
for hash in itervalues(gots):
hash.update(chunk)
for hash_name, got in iteritems(gots):
if got.hexdigest() in self._allowed[hash_name]:
return
self._raise(gots)
|
[
"def",
"check_against_chunks",
"(",
"self",
",",
"chunks",
")",
":",
"# type: (Iterator[bytes]) -> None",
"gots",
"=",
"{",
"}",
"for",
"hash_name",
"in",
"iterkeys",
"(",
"self",
".",
"_allowed",
")",
":",
"try",
":",
"gots",
"[",
"hash_name",
"]",
"=",
"hashlib",
".",
"new",
"(",
"hash_name",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"raise",
"InstallationError",
"(",
"'Unknown hash name: %s'",
"%",
"hash_name",
")",
"for",
"chunk",
"in",
"chunks",
":",
"for",
"hash",
"in",
"itervalues",
"(",
"gots",
")",
":",
"hash",
".",
"update",
"(",
"chunk",
")",
"for",
"hash_name",
",",
"got",
"in",
"iteritems",
"(",
"gots",
")",
":",
"if",
"got",
".",
"hexdigest",
"(",
")",
"in",
"self",
".",
"_allowed",
"[",
"hash_name",
"]",
":",
"return",
"self",
".",
"_raise",
"(",
"gots",
")"
] | 31.782609 | 16.434783 |
def get_upload_path(finfo, sample_info, config):
""""Dry" update the file: only return the upload path
"""
try:
storage_dir = _get_storage_dir(finfo, config)
except ValueError:
return None
if finfo.get("type") == "directory":
return _get_dir_upload_path(finfo, storage_dir)
else:
return _get_file_upload_path(finfo, storage_dir)
|
[
"def",
"get_upload_path",
"(",
"finfo",
",",
"sample_info",
",",
"config",
")",
":",
"try",
":",
"storage_dir",
"=",
"_get_storage_dir",
"(",
"finfo",
",",
"config",
")",
"except",
"ValueError",
":",
"return",
"None",
"if",
"finfo",
".",
"get",
"(",
"\"type\"",
")",
"==",
"\"directory\"",
":",
"return",
"_get_dir_upload_path",
"(",
"finfo",
",",
"storage_dir",
")",
"else",
":",
"return",
"_get_file_upload_path",
"(",
"finfo",
",",
"storage_dir",
")"
] | 31.5 | 15.833333 |
async def handle_request(self, request: Request) -> Response:
"""
coroutine: This method is called by Transport
implementation to handle the actual request.
It returns a webtype.Response object.
"""
# Get handler
try:
try:
self._set_ctx(request)
handler = self.router.get_handler_for_request(request)
request.app = self
response = await handler(request)
response.app = self
except ResponseError as r:
parser = app_parsers.get(request.content_type, None)
# Content-Type of an error response will be the same as the incoming request
# unless a parser for that content type is not found.
if not parser:
content_type = r.content_type
if not content_type:
content_type = self.default_content_type
else:
content_type = request.content_type
response = Response(
headers=r.headers, correlation_id=r.correlation_id, body=r.body,
status=r.status, content_type=content_type
)
response.app = self
if r.log:
exc_info = sys.exc_info()
self.logger.log_exception(request, exc_info, level='warning')
# invoke serialization (json) to make sure it works
_ = response.body
except CancelledError:
# This error can happen if a client closes the connection
# The response shouldnt really ever be used
return None
except asyncio.TimeoutError:
response = Response(status=HTTPStatus.GATEWAY_TIMEOUT,
body={'message': 'Gateway Timeout'})
response.app = self
except NackMePleaseError:
""" See message where this error is defined """
raise
except Exception:
exc_info = sys.exc_info()
self.logger.log_exception(request, exc_info)
response = Response(status=HTTPStatus.INTERNAL_SERVER_ERROR,
body={'message': 'Server Error'})
response.app = self
if not response.correlation_id:
response.correlation_id = request.correlation_id
if self._cors_handler is not None:
self._cors_handler.add_cors_headers(request, response)
# add default headers
response.headers = {**self.default_headers, **response.headers}
return response
|
[
"async",
"def",
"handle_request",
"(",
"self",
",",
"request",
":",
"Request",
")",
"->",
"Response",
":",
"# Get handler",
"try",
":",
"try",
":",
"self",
".",
"_set_ctx",
"(",
"request",
")",
"handler",
"=",
"self",
".",
"router",
".",
"get_handler_for_request",
"(",
"request",
")",
"request",
".",
"app",
"=",
"self",
"response",
"=",
"await",
"handler",
"(",
"request",
")",
"response",
".",
"app",
"=",
"self",
"except",
"ResponseError",
"as",
"r",
":",
"parser",
"=",
"app_parsers",
".",
"get",
"(",
"request",
".",
"content_type",
",",
"None",
")",
"# Content-Type of an error response will be the same as the incoming request",
"# unless a parser for that content type is not found.",
"if",
"not",
"parser",
":",
"content_type",
"=",
"r",
".",
"content_type",
"if",
"not",
"content_type",
":",
"content_type",
"=",
"self",
".",
"default_content_type",
"else",
":",
"content_type",
"=",
"request",
".",
"content_type",
"response",
"=",
"Response",
"(",
"headers",
"=",
"r",
".",
"headers",
",",
"correlation_id",
"=",
"r",
".",
"correlation_id",
",",
"body",
"=",
"r",
".",
"body",
",",
"status",
"=",
"r",
".",
"status",
",",
"content_type",
"=",
"content_type",
")",
"response",
".",
"app",
"=",
"self",
"if",
"r",
".",
"log",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"self",
".",
"logger",
".",
"log_exception",
"(",
"request",
",",
"exc_info",
",",
"level",
"=",
"'warning'",
")",
"# invoke serialization (json) to make sure it works",
"_",
"=",
"response",
".",
"body",
"except",
"CancelledError",
":",
"# This error can happen if a client closes the connection",
"# The response shouldnt really ever be used",
"return",
"None",
"except",
"asyncio",
".",
"TimeoutError",
":",
"response",
"=",
"Response",
"(",
"status",
"=",
"HTTPStatus",
".",
"GATEWAY_TIMEOUT",
",",
"body",
"=",
"{",
"'message'",
":",
"'Gateway Timeout'",
"}",
")",
"response",
".",
"app",
"=",
"self",
"except",
"NackMePleaseError",
":",
"\"\"\" See message where this error is defined \"\"\"",
"raise",
"except",
"Exception",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"self",
".",
"logger",
".",
"log_exception",
"(",
"request",
",",
"exc_info",
")",
"response",
"=",
"Response",
"(",
"status",
"=",
"HTTPStatus",
".",
"INTERNAL_SERVER_ERROR",
",",
"body",
"=",
"{",
"'message'",
":",
"'Server Error'",
"}",
")",
"response",
".",
"app",
"=",
"self",
"if",
"not",
"response",
".",
"correlation_id",
":",
"response",
".",
"correlation_id",
"=",
"request",
".",
"correlation_id",
"if",
"self",
".",
"_cors_handler",
"is",
"not",
"None",
":",
"self",
".",
"_cors_handler",
".",
"add_cors_headers",
"(",
"request",
",",
"response",
")",
"# add default headers",
"response",
".",
"headers",
"=",
"{",
"*",
"*",
"self",
".",
"default_headers",
",",
"*",
"*",
"response",
".",
"headers",
"}",
"return",
"response"
] | 39.606061 | 18.954545 |
def get(self):
"""API endpoint to get the related blocks for a transaction.
Return:
A ``list`` of ``block_id``s that contain the given transaction. The
list may be filtered when provided a status query parameter:
"valid", "invalid", "undecided".
"""
parser = reqparse.RequestParser()
parser.add_argument('transaction_id', type=str, required=True)
args = parser.parse_args(strict=True)
tx_id = args['transaction_id']
pool = current_app.config['bigchain_pool']
with pool() as bigchain:
blocks = bigchain.get_block_containing_tx(tx_id)
return blocks
|
[
"def",
"get",
"(",
"self",
")",
":",
"parser",
"=",
"reqparse",
".",
"RequestParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'transaction_id'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"strict",
"=",
"True",
")",
"tx_id",
"=",
"args",
"[",
"'transaction_id'",
"]",
"pool",
"=",
"current_app",
".",
"config",
"[",
"'bigchain_pool'",
"]",
"with",
"pool",
"(",
")",
"as",
"bigchain",
":",
"blocks",
"=",
"bigchain",
".",
"get_block_containing_tx",
"(",
"tx_id",
")",
"return",
"blocks"
] | 33 | 21.05 |
def unadvertise_endpoint(self, endpointid):
"""
Unadvertise a previously-advertised endpointid (string).
:param endpointid. The string returned from ed.get_id() or
the value of property endpoint.id. Should not be None
:return True if removed, False if not removed (hasn't been previously advertised
by this advertiser
"""
with self._published_endpoints_lock:
with self._published_endpoints_lock:
advertised = self.get_advertised_endpoint(endpointid)
if not advertised:
return None
unadvertise_result = self._unadvertise(advertised)
if unadvertise_result:
self._remove_advertised(endpointid)
return None
|
[
"def",
"unadvertise_endpoint",
"(",
"self",
",",
"endpointid",
")",
":",
"with",
"self",
".",
"_published_endpoints_lock",
":",
"with",
"self",
".",
"_published_endpoints_lock",
":",
"advertised",
"=",
"self",
".",
"get_advertised_endpoint",
"(",
"endpointid",
")",
"if",
"not",
"advertised",
":",
"return",
"None",
"unadvertise_result",
"=",
"self",
".",
"_unadvertise",
"(",
"advertised",
")",
"if",
"unadvertise_result",
":",
"self",
".",
"_remove_advertised",
"(",
"endpointid",
")",
"return",
"None"
] | 37.333333 | 19.52381 |
def indexdelta(self, stop_id, start_id):
"""returns the distance (int) between to idices.
Two consecutive tokens must have a delta of 1.
"""
return self.tokenid2index(stop_id) - self.tokenid2index(start_id)
|
[
"def",
"indexdelta",
"(",
"self",
",",
"stop_id",
",",
"start_id",
")",
":",
"return",
"self",
".",
"tokenid2index",
"(",
"stop_id",
")",
"-",
"self",
".",
"tokenid2index",
"(",
"start_id",
")"
] | 39 | 14.5 |
def _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line):
"""Reflow the lines so that it looks nice."""
if unicode(parsed_tokens[0]) == 'def':
# A function definition gets indented a bit more.
continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE
else:
continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE
break_after_open_bracket = not start_on_prefix_line
lines = ReformattedLines(max_line_length)
lines.add_indent(len(indentation.lstrip('\r\n')))
if not start_on_prefix_line:
# If splitting after the opening bracket will cause the first element
# to be aligned weirdly, don't try it.
first_token = get_item(parsed_tokens, 0)
second_token = get_item(parsed_tokens, 1)
if (
first_token and second_token and
unicode(second_token)[0] == '(' and
len(indentation) + len(first_token) + 1 == len(continued_indent)
):
return None
for item in parsed_tokens:
lines.add_space_if_needed(unicode(item), equal=True)
save_continued_indent = continued_indent
if start_on_prefix_line and isinstance(item, Container):
start_on_prefix_line = False
continued_indent = ' ' * (lines.current_size() + 1)
item.reflow(lines, continued_indent, break_after_open_bracket)
continued_indent = save_continued_indent
return lines.emit()
|
[
"def",
"_reflow_lines",
"(",
"parsed_tokens",
",",
"indentation",
",",
"max_line_length",
",",
"start_on_prefix_line",
")",
":",
"if",
"unicode",
"(",
"parsed_tokens",
"[",
"0",
"]",
")",
"==",
"'def'",
":",
"# A function definition gets indented a bit more.",
"continued_indent",
"=",
"indentation",
"+",
"' '",
"*",
"2",
"*",
"DEFAULT_INDENT_SIZE",
"else",
":",
"continued_indent",
"=",
"indentation",
"+",
"' '",
"*",
"DEFAULT_INDENT_SIZE",
"break_after_open_bracket",
"=",
"not",
"start_on_prefix_line",
"lines",
"=",
"ReformattedLines",
"(",
"max_line_length",
")",
"lines",
".",
"add_indent",
"(",
"len",
"(",
"indentation",
".",
"lstrip",
"(",
"'\\r\\n'",
")",
")",
")",
"if",
"not",
"start_on_prefix_line",
":",
"# If splitting after the opening bracket will cause the first element",
"# to be aligned weirdly, don't try it.",
"first_token",
"=",
"get_item",
"(",
"parsed_tokens",
",",
"0",
")",
"second_token",
"=",
"get_item",
"(",
"parsed_tokens",
",",
"1",
")",
"if",
"(",
"first_token",
"and",
"second_token",
"and",
"unicode",
"(",
"second_token",
")",
"[",
"0",
"]",
"==",
"'('",
"and",
"len",
"(",
"indentation",
")",
"+",
"len",
"(",
"first_token",
")",
"+",
"1",
"==",
"len",
"(",
"continued_indent",
")",
")",
":",
"return",
"None",
"for",
"item",
"in",
"parsed_tokens",
":",
"lines",
".",
"add_space_if_needed",
"(",
"unicode",
"(",
"item",
")",
",",
"equal",
"=",
"True",
")",
"save_continued_indent",
"=",
"continued_indent",
"if",
"start_on_prefix_line",
"and",
"isinstance",
"(",
"item",
",",
"Container",
")",
":",
"start_on_prefix_line",
"=",
"False",
"continued_indent",
"=",
"' '",
"*",
"(",
"lines",
".",
"current_size",
"(",
")",
"+",
"1",
")",
"item",
".",
"reflow",
"(",
"lines",
",",
"continued_indent",
",",
"break_after_open_bracket",
")",
"continued_indent",
"=",
"save_continued_indent",
"return",
"lines",
".",
"emit",
"(",
")"
] | 36.45 | 21.275 |
def syncScrollbars(self):
"""
Synchronizes the various scrollbars within this chart.
"""
chart_hbar = self.uiChartVIEW.horizontalScrollBar()
chart_vbar = self.uiChartVIEW.verticalScrollBar()
x_hbar = self.uiXAxisVIEW.horizontalScrollBar()
x_vbar = self.uiXAxisVIEW.verticalScrollBar()
y_hbar = self.uiYAxisVIEW.horizontalScrollBar()
y_vbar = self.uiYAxisVIEW.verticalScrollBar()
x_hbar.setRange(chart_hbar.minimum(), chart_hbar.maximum())
x_hbar.setValue(chart_hbar.value())
x_vbar.setValue(0)
chart_vbar.setRange(y_vbar.minimum(), y_vbar.maximum())
chart_vbar.setValue(y_vbar.value())
y_hbar.setValue(4)
|
[
"def",
"syncScrollbars",
"(",
"self",
")",
":",
"chart_hbar",
"=",
"self",
".",
"uiChartVIEW",
".",
"horizontalScrollBar",
"(",
")",
"chart_vbar",
"=",
"self",
".",
"uiChartVIEW",
".",
"verticalScrollBar",
"(",
")",
"x_hbar",
"=",
"self",
".",
"uiXAxisVIEW",
".",
"horizontalScrollBar",
"(",
")",
"x_vbar",
"=",
"self",
".",
"uiXAxisVIEW",
".",
"verticalScrollBar",
"(",
")",
"y_hbar",
"=",
"self",
".",
"uiYAxisVIEW",
".",
"horizontalScrollBar",
"(",
")",
"y_vbar",
"=",
"self",
".",
"uiYAxisVIEW",
".",
"verticalScrollBar",
"(",
")",
"x_hbar",
".",
"setRange",
"(",
"chart_hbar",
".",
"minimum",
"(",
")",
",",
"chart_hbar",
".",
"maximum",
"(",
")",
")",
"x_hbar",
".",
"setValue",
"(",
"chart_hbar",
".",
"value",
"(",
")",
")",
"x_vbar",
".",
"setValue",
"(",
"0",
")",
"chart_vbar",
".",
"setRange",
"(",
"y_vbar",
".",
"minimum",
"(",
")",
",",
"y_vbar",
".",
"maximum",
"(",
")",
")",
"chart_vbar",
".",
"setValue",
"(",
"y_vbar",
".",
"value",
"(",
")",
")",
"y_hbar",
".",
"setValue",
"(",
"4",
")"
] | 36.619048 | 17.952381 |
def deploy_files(local_dir, remote_dir, pattern = '',rsync_exclude=['*.pyc','.*'], use_sudo=False):
"""
Generic deploy function for cases where one or more files are being deployed to a host.
Wraps around ``rsync_project`` and stages files locally and/or remotely
for network efficiency.
``local_dir`` is the directory that will be deployed.
``remote_dir`` is the directory the files will be deployed to.
Directories will be created if necessary.
Note: Unlike other ways of deploying files, all files under local_dir will be
deployed into remote_dir. This is the equivalent to cp -R local_dir/* remote_dir.
``pattern`` finds all the pathnames matching a specified glob pattern relative
to the local_dir according to the rules used by the Unix shell.
``pattern`` enhances the basic functionality by allowing the python | to include
multiple patterns. eg '*.txt|Django*'
``rsync_exclude`` as per ``rsync_project``
Returns a list of directories and files created on the host.
"""
#normalise paths
if local_dir[-1] == os.sep: local_dir = local_dir[:-1]
if remote_dir[-1] == '/': remote_dir = remote_dir[:-1]
created_list = []
staging_dir = local_dir
#resolve pattern into a dir:filename dict
local_files = _get_local_files(local_dir,pattern)
#If we are only copying specific files or rendering templates we need to stage locally
if local_files: staging_dir = _stage_local_files(local_dir, local_files)
remote_staging_dir = '/home/%s/.staging'% env.user
if not exists(remote_staging_dir):
run(' '.join(['mkdir -pv',remote_staging_dir])).split('\n')
created_list = [remote_staging_dir]
#upload into remote staging
rsync_project(local_dir=staging_dir,remote_dir=remote_staging_dir,exclude=rsync_exclude,delete=True)
#create the final destination
created_dir_list = mkdirs(remote_dir, use_sudo)
if not os.listdir(staging_dir): return created_list
func = use_sudo and sudo or run
#cp recursively -R from the staging to the destination and keep a list
remote_base_path = '/'.join([remote_staging_dir,os.path.basename(local_dir),'*'])
copy_file_list = func(' '.join(['cp -Ruv',remote_base_path,remote_dir])).split('\n')
if copy_file_list[0]: created_list += [file.split(' ')[2][1:-1] for file in copy_file_list if file]
#cleanup any tmp staging dir
if staging_dir <> local_dir:
shutil.rmtree(staging_dir,ignore_errors=True)
return created_list
|
[
"def",
"deploy_files",
"(",
"local_dir",
",",
"remote_dir",
",",
"pattern",
"=",
"''",
",",
"rsync_exclude",
"=",
"[",
"'*.pyc'",
",",
"'.*'",
"]",
",",
"use_sudo",
"=",
"False",
")",
":",
"#normalise paths",
"if",
"local_dir",
"[",
"-",
"1",
"]",
"==",
"os",
".",
"sep",
":",
"local_dir",
"=",
"local_dir",
"[",
":",
"-",
"1",
"]",
"if",
"remote_dir",
"[",
"-",
"1",
"]",
"==",
"'/'",
":",
"remote_dir",
"=",
"remote_dir",
"[",
":",
"-",
"1",
"]",
"created_list",
"=",
"[",
"]",
"staging_dir",
"=",
"local_dir",
"#resolve pattern into a dir:filename dict",
"local_files",
"=",
"_get_local_files",
"(",
"local_dir",
",",
"pattern",
")",
"#If we are only copying specific files or rendering templates we need to stage locally",
"if",
"local_files",
":",
"staging_dir",
"=",
"_stage_local_files",
"(",
"local_dir",
",",
"local_files",
")",
"remote_staging_dir",
"=",
"'/home/%s/.staging'",
"%",
"env",
".",
"user",
"if",
"not",
"exists",
"(",
"remote_staging_dir",
")",
":",
"run",
"(",
"' '",
".",
"join",
"(",
"[",
"'mkdir -pv'",
",",
"remote_staging_dir",
"]",
")",
")",
".",
"split",
"(",
"'\\n'",
")",
"created_list",
"=",
"[",
"remote_staging_dir",
"]",
"#upload into remote staging",
"rsync_project",
"(",
"local_dir",
"=",
"staging_dir",
",",
"remote_dir",
"=",
"remote_staging_dir",
",",
"exclude",
"=",
"rsync_exclude",
",",
"delete",
"=",
"True",
")",
"#create the final destination",
"created_dir_list",
"=",
"mkdirs",
"(",
"remote_dir",
",",
"use_sudo",
")",
"if",
"not",
"os",
".",
"listdir",
"(",
"staging_dir",
")",
":",
"return",
"created_list",
"func",
"=",
"use_sudo",
"and",
"sudo",
"or",
"run",
"#cp recursively -R from the staging to the destination and keep a list",
"remote_base_path",
"=",
"'/'",
".",
"join",
"(",
"[",
"remote_staging_dir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"local_dir",
")",
",",
"'*'",
"]",
")",
"copy_file_list",
"=",
"func",
"(",
"' '",
".",
"join",
"(",
"[",
"'cp -Ruv'",
",",
"remote_base_path",
",",
"remote_dir",
"]",
")",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"copy_file_list",
"[",
"0",
"]",
":",
"created_list",
"+=",
"[",
"file",
".",
"split",
"(",
"' '",
")",
"[",
"2",
"]",
"[",
"1",
":",
"-",
"1",
"]",
"for",
"file",
"in",
"copy_file_list",
"if",
"file",
"]",
"#cleanup any tmp staging dir",
"if",
"staging_dir",
"<>",
"local_dir",
":",
"shutil",
".",
"rmtree",
"(",
"staging_dir",
",",
"ignore_errors",
"=",
"True",
")",
"return",
"created_list"
] | 43.327586 | 26.568966 |
def get_company_user(self, email):
"""Get company user based on email.
:param email: address of contact
:type email: ``str``, ``unicode``
:rtype: ``dict`` with contact information
"""
users = self.get_company_users()
for user in users:
if user['email'] == email:
return user
msg = 'No user with email: "{email}" associated with this company.'
raise FMBaseError(msg.format(email=email))
|
[
"def",
"get_company_user",
"(",
"self",
",",
"email",
")",
":",
"users",
"=",
"self",
".",
"get_company_users",
"(",
")",
"for",
"user",
"in",
"users",
":",
"if",
"user",
"[",
"'email'",
"]",
"==",
"email",
":",
"return",
"user",
"msg",
"=",
"'No user with email: \"{email}\" associated with this company.'",
"raise",
"FMBaseError",
"(",
"msg",
".",
"format",
"(",
"email",
"=",
"email",
")",
")"
] | 31.6 | 14 |
def PixelsHDU(model):
'''
Construct the HDU containing the pixel-level light curve.
'''
# Get mission cards
cards = model._mission.HDUCards(model.meta, hdu=2)
# Add EVEREST info
cards = []
cards.append(('COMMENT', '************************'))
cards.append(('COMMENT', '* EVEREST INFO *'))
cards.append(('COMMENT', '************************'))
cards.append(('MISSION', model.mission, 'Mission name'))
cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version'))
cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion'))
cards.append(('DATE', strftime('%Y-%m-%d'),
'EVEREST file creation date (YYYY-MM-DD)'))
# Create the HDU
header = pyfits.Header(cards=cards)
# The pixel timeseries
arrays = [pyfits.Column(name='FPIX', format='%dD' %
model.fpix.shape[1], array=model.fpix)]
# The first order PLD vectors for all the neighbors (npixels, ncadences)
X1N = model.X1N
if X1N is not None:
arrays.append(pyfits.Column(name='X1N', format='%dD' %
X1N.shape[1], array=X1N))
cols = pyfits.ColDefs(arrays)
hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='PIXELS')
return hdu
|
[
"def",
"PixelsHDU",
"(",
"model",
")",
":",
"# Get mission cards",
"cards",
"=",
"model",
".",
"_mission",
".",
"HDUCards",
"(",
"model",
".",
"meta",
",",
"hdu",
"=",
"2",
")",
"# Add EVEREST info",
"cards",
"=",
"[",
"]",
"cards",
".",
"append",
"(",
"(",
"'COMMENT'",
",",
"'************************'",
")",
")",
"cards",
".",
"append",
"(",
"(",
"'COMMENT'",
",",
"'* EVEREST INFO *'",
")",
")",
"cards",
".",
"append",
"(",
"(",
"'COMMENT'",
",",
"'************************'",
")",
")",
"cards",
".",
"append",
"(",
"(",
"'MISSION'",
",",
"model",
".",
"mission",
",",
"'Mission name'",
")",
")",
"cards",
".",
"append",
"(",
"(",
"'VERSION'",
",",
"EVEREST_MAJOR_MINOR",
",",
"'EVEREST pipeline version'",
")",
")",
"cards",
".",
"append",
"(",
"(",
"'SUBVER'",
",",
"EVEREST_VERSION",
",",
"'EVEREST pipeline subversion'",
")",
")",
"cards",
".",
"append",
"(",
"(",
"'DATE'",
",",
"strftime",
"(",
"'%Y-%m-%d'",
")",
",",
"'EVEREST file creation date (YYYY-MM-DD)'",
")",
")",
"# Create the HDU",
"header",
"=",
"pyfits",
".",
"Header",
"(",
"cards",
"=",
"cards",
")",
"# The pixel timeseries",
"arrays",
"=",
"[",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"'FPIX'",
",",
"format",
"=",
"'%dD'",
"%",
"model",
".",
"fpix",
".",
"shape",
"[",
"1",
"]",
",",
"array",
"=",
"model",
".",
"fpix",
")",
"]",
"# The first order PLD vectors for all the neighbors (npixels, ncadences)",
"X1N",
"=",
"model",
".",
"X1N",
"if",
"X1N",
"is",
"not",
"None",
":",
"arrays",
".",
"append",
"(",
"pyfits",
".",
"Column",
"(",
"name",
"=",
"'X1N'",
",",
"format",
"=",
"'%dD'",
"%",
"X1N",
".",
"shape",
"[",
"1",
"]",
",",
"array",
"=",
"X1N",
")",
")",
"cols",
"=",
"pyfits",
".",
"ColDefs",
"(",
"arrays",
")",
"hdu",
"=",
"pyfits",
".",
"BinTableHDU",
".",
"from_columns",
"(",
"cols",
",",
"header",
"=",
"header",
",",
"name",
"=",
"'PIXELS'",
")",
"return",
"hdu"
] | 34.432432 | 25.351351 |
def send_message(self):
"""Send message over UDP.
If tracking is disables, the bytes_sent will always be set to -1
Returns:
(bytes_sent, time_taken)
"""
start = time.time()
message = None
if not self.initialized:
message = self.construct_start_message()
self.initialized = True
else:
message = self.construct_end_message()
self.send_UDP_message(message)
end = time.time()
return end - start
|
[
"def",
"send_message",
"(",
"self",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"message",
"=",
"None",
"if",
"not",
"self",
".",
"initialized",
":",
"message",
"=",
"self",
".",
"construct_start_message",
"(",
")",
"self",
".",
"initialized",
"=",
"True",
"else",
":",
"message",
"=",
"self",
".",
"construct_end_message",
"(",
")",
"self",
".",
"send_UDP_message",
"(",
"message",
")",
"end",
"=",
"time",
".",
"time",
"(",
")",
"return",
"end",
"-",
"start"
] | 25.55 | 18.05 |
def _create_resource(resource, name=None, tags=None, region=None, key=None,
keyid=None, profile=None, **kwargs):
'''
Create a VPC resource. Returns the resource id if created, or False
if not created.
'''
try:
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
create_resource = getattr(conn, 'create_' + resource)
except AttributeError:
raise AttributeError('{0} function does not exist for boto VPC '
'connection.'.format('create_' + resource))
if name and _get_resource_id(resource, name, region=region, key=key,
keyid=keyid, profile=profile):
return {'created': False, 'error': {'message':
'A {0} named {1} already exists.'.format(
resource, name)}}
r = create_resource(**kwargs)
if r:
if isinstance(r, bool):
return {'created': True}
else:
log.info('A %s with id %s was created', resource, r.id)
_maybe_set_name_tag(name, r)
_maybe_set_tags(tags, r)
if name:
_cache_id(name,
sub_resource=resource,
resource_id=r.id,
region=region,
key=key, keyid=keyid,
profile=profile)
return {'created': True, 'id': r.id}
else:
if name:
e = '{0} {1} was not created.'.format(resource, name)
else:
e = '{0} was not created.'.format(resource)
log.warning(e)
return {'created': False, 'error': {'message': e}}
except BotoServerError as e:
return {'created': False, 'error': __utils__['boto.get_error'](e)}
|
[
"def",
"_create_resource",
"(",
"resource",
",",
"name",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"create_resource",
"=",
"getattr",
"(",
"conn",
",",
"'create_'",
"+",
"resource",
")",
"except",
"AttributeError",
":",
"raise",
"AttributeError",
"(",
"'{0} function does not exist for boto VPC '",
"'connection.'",
".",
"format",
"(",
"'create_'",
"+",
"resource",
")",
")",
"if",
"name",
"and",
"_get_resource_id",
"(",
"resource",
",",
"name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
":",
"return",
"{",
"'created'",
":",
"False",
",",
"'error'",
":",
"{",
"'message'",
":",
"'A {0} named {1} already exists.'",
".",
"format",
"(",
"resource",
",",
"name",
")",
"}",
"}",
"r",
"=",
"create_resource",
"(",
"*",
"*",
"kwargs",
")",
"if",
"r",
":",
"if",
"isinstance",
"(",
"r",
",",
"bool",
")",
":",
"return",
"{",
"'created'",
":",
"True",
"}",
"else",
":",
"log",
".",
"info",
"(",
"'A %s with id %s was created'",
",",
"resource",
",",
"r",
".",
"id",
")",
"_maybe_set_name_tag",
"(",
"name",
",",
"r",
")",
"_maybe_set_tags",
"(",
"tags",
",",
"r",
")",
"if",
"name",
":",
"_cache_id",
"(",
"name",
",",
"sub_resource",
"=",
"resource",
",",
"resource_id",
"=",
"r",
".",
"id",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"return",
"{",
"'created'",
":",
"True",
",",
"'id'",
":",
"r",
".",
"id",
"}",
"else",
":",
"if",
"name",
":",
"e",
"=",
"'{0} {1} was not created.'",
".",
"format",
"(",
"resource",
",",
"name",
")",
"else",
":",
"e",
"=",
"'{0} was not created.'",
".",
"format",
"(",
"resource",
")",
"log",
".",
"warning",
"(",
"e",
")",
"return",
"{",
"'created'",
":",
"False",
",",
"'error'",
":",
"{",
"'message'",
":",
"e",
"}",
"}",
"except",
"BotoServerError",
"as",
"e",
":",
"return",
"{",
"'created'",
":",
"False",
",",
"'error'",
":",
"__utils__",
"[",
"'boto.get_error'",
"]",
"(",
"e",
")",
"}"
] | 39.75 | 21.75 |
def fields(self):
"""
return all the fields and their raw values for this Orm instance. This
property returns a dict with the field names and their current values
if you want to control the values for outputting to an api, use .jsonable()
"""
return {k:getattr(self, k, None) for k in self.schema.fields}
|
[
"def",
"fields",
"(",
"self",
")",
":",
"return",
"{",
"k",
":",
"getattr",
"(",
"self",
",",
"k",
",",
"None",
")",
"for",
"k",
"in",
"self",
".",
"schema",
".",
"fields",
"}"
] | 43.25 | 26.25 |
def do_build(self, argv):
"""\
build [TARGETS] Build the specified TARGETS and their
dependencies. 'b' is a synonym.
"""
import SCons.Node
import SCons.SConsign
import SCons.Script.Main
options = copy.deepcopy(self.options)
options, targets = self.parser.parse_args(argv[1:], values=options)
SCons.Script.COMMAND_LINE_TARGETS = targets
if targets:
SCons.Script.BUILD_TARGETS = targets
else:
# If the user didn't specify any targets on the command line,
# use the list of default targets.
SCons.Script.BUILD_TARGETS = SCons.Script._build_plus_default
nodes = SCons.Script.Main._build_targets(self.fs,
options,
targets,
self.target_top)
if not nodes:
return
# Call each of the Node's alter_targets() methods, which may
# provide additional targets that ended up as part of the build
# (the canonical example being a VariantDir() when we're building
# from a source directory) and which we therefore need their
# state cleared, too.
x = []
for n in nodes:
x.extend(n.alter_targets()[0])
nodes.extend(x)
# Clean up so that we can perform the next build correctly.
#
# We do this by walking over all the children of the targets,
# and clearing their state.
#
# We currently have to re-scan each node to find their
# children, because built nodes have already been partially
# cleared and don't remember their children. (In scons
# 0.96.1 and earlier, this wasn't the case, and we didn't
# have to re-scan the nodes.)
#
# Because we have to re-scan each node, we can't clear the
# nodes as we walk over them, because we may end up rescanning
# a cleared node as we scan a later node. Therefore, only
# store the list of nodes that need to be cleared as we walk
# the tree, and clear them in a separate pass.
#
# XXX: Someone more familiar with the inner workings of scons
# may be able to point out a more efficient way to do this.
SCons.Script.Main.progress_display("scons: Clearing cached node information ...")
seen_nodes = {}
def get_unseen_children(node, parent, seen_nodes=seen_nodes):
def is_unseen(node, seen_nodes=seen_nodes):
return node not in seen_nodes
return [child for child in node.children(scan=1) if is_unseen(child)]
def add_to_seen_nodes(node, parent, seen_nodes=seen_nodes):
seen_nodes[node] = 1
# If this file is in a VariantDir and has a
# corresponding source file in the source tree, remember the
# node in the source tree, too. This is needed in
# particular to clear cached implicit dependencies on the
# source file, since the scanner will scan it if the
# VariantDir was created with duplicate=0.
try:
rfile_method = node.rfile
except AttributeError:
return
else:
rfile = rfile_method()
if rfile != node:
seen_nodes[rfile] = 1
for node in nodes:
walker = SCons.Node.Walker(node,
kids_func=get_unseen_children,
eval_func=add_to_seen_nodes)
n = walker.get_next()
while n:
n = walker.get_next()
for node in list(seen_nodes.keys()):
# Call node.clear() to clear most of the state
node.clear()
# node.clear() doesn't reset node.state, so call
# node.set_state() to reset it manually
node.set_state(SCons.Node.no_state)
node.implicit = None
# Debug: Uncomment to verify that all Taskmaster reference
# counts have been reset to zero.
#if node.ref_count != 0:
# from SCons.Debug import Trace
# Trace('node %s, ref_count %s !!!\n' % (node, node.ref_count))
SCons.SConsign.Reset()
SCons.Script.Main.progress_display("scons: done clearing node information.")
|
[
"def",
"do_build",
"(",
"self",
",",
"argv",
")",
":",
"import",
"SCons",
".",
"Node",
"import",
"SCons",
".",
"SConsign",
"import",
"SCons",
".",
"Script",
".",
"Main",
"options",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"options",
")",
"options",
",",
"targets",
"=",
"self",
".",
"parser",
".",
"parse_args",
"(",
"argv",
"[",
"1",
":",
"]",
",",
"values",
"=",
"options",
")",
"SCons",
".",
"Script",
".",
"COMMAND_LINE_TARGETS",
"=",
"targets",
"if",
"targets",
":",
"SCons",
".",
"Script",
".",
"BUILD_TARGETS",
"=",
"targets",
"else",
":",
"# If the user didn't specify any targets on the command line,",
"# use the list of default targets.",
"SCons",
".",
"Script",
".",
"BUILD_TARGETS",
"=",
"SCons",
".",
"Script",
".",
"_build_plus_default",
"nodes",
"=",
"SCons",
".",
"Script",
".",
"Main",
".",
"_build_targets",
"(",
"self",
".",
"fs",
",",
"options",
",",
"targets",
",",
"self",
".",
"target_top",
")",
"if",
"not",
"nodes",
":",
"return",
"# Call each of the Node's alter_targets() methods, which may",
"# provide additional targets that ended up as part of the build",
"# (the canonical example being a VariantDir() when we're building",
"# from a source directory) and which we therefore need their",
"# state cleared, too.",
"x",
"=",
"[",
"]",
"for",
"n",
"in",
"nodes",
":",
"x",
".",
"extend",
"(",
"n",
".",
"alter_targets",
"(",
")",
"[",
"0",
"]",
")",
"nodes",
".",
"extend",
"(",
"x",
")",
"# Clean up so that we can perform the next build correctly.",
"#",
"# We do this by walking over all the children of the targets,",
"# and clearing their state.",
"#",
"# We currently have to re-scan each node to find their",
"# children, because built nodes have already been partially",
"# cleared and don't remember their children. (In scons",
"# 0.96.1 and earlier, this wasn't the case, and we didn't",
"# have to re-scan the nodes.)",
"#",
"# Because we have to re-scan each node, we can't clear the",
"# nodes as we walk over them, because we may end up rescanning",
"# a cleared node as we scan a later node. Therefore, only",
"# store the list of nodes that need to be cleared as we walk",
"# the tree, and clear them in a separate pass.",
"#",
"# XXX: Someone more familiar with the inner workings of scons",
"# may be able to point out a more efficient way to do this.",
"SCons",
".",
"Script",
".",
"Main",
".",
"progress_display",
"(",
"\"scons: Clearing cached node information ...\"",
")",
"seen_nodes",
"=",
"{",
"}",
"def",
"get_unseen_children",
"(",
"node",
",",
"parent",
",",
"seen_nodes",
"=",
"seen_nodes",
")",
":",
"def",
"is_unseen",
"(",
"node",
",",
"seen_nodes",
"=",
"seen_nodes",
")",
":",
"return",
"node",
"not",
"in",
"seen_nodes",
"return",
"[",
"child",
"for",
"child",
"in",
"node",
".",
"children",
"(",
"scan",
"=",
"1",
")",
"if",
"is_unseen",
"(",
"child",
")",
"]",
"def",
"add_to_seen_nodes",
"(",
"node",
",",
"parent",
",",
"seen_nodes",
"=",
"seen_nodes",
")",
":",
"seen_nodes",
"[",
"node",
"]",
"=",
"1",
"# If this file is in a VariantDir and has a",
"# corresponding source file in the source tree, remember the",
"# node in the source tree, too. This is needed in",
"# particular to clear cached implicit dependencies on the",
"# source file, since the scanner will scan it if the",
"# VariantDir was created with duplicate=0.",
"try",
":",
"rfile_method",
"=",
"node",
".",
"rfile",
"except",
"AttributeError",
":",
"return",
"else",
":",
"rfile",
"=",
"rfile_method",
"(",
")",
"if",
"rfile",
"!=",
"node",
":",
"seen_nodes",
"[",
"rfile",
"]",
"=",
"1",
"for",
"node",
"in",
"nodes",
":",
"walker",
"=",
"SCons",
".",
"Node",
".",
"Walker",
"(",
"node",
",",
"kids_func",
"=",
"get_unseen_children",
",",
"eval_func",
"=",
"add_to_seen_nodes",
")",
"n",
"=",
"walker",
".",
"get_next",
"(",
")",
"while",
"n",
":",
"n",
"=",
"walker",
".",
"get_next",
"(",
")",
"for",
"node",
"in",
"list",
"(",
"seen_nodes",
".",
"keys",
"(",
")",
")",
":",
"# Call node.clear() to clear most of the state",
"node",
".",
"clear",
"(",
")",
"# node.clear() doesn't reset node.state, so call",
"# node.set_state() to reset it manually",
"node",
".",
"set_state",
"(",
"SCons",
".",
"Node",
".",
"no_state",
")",
"node",
".",
"implicit",
"=",
"None",
"# Debug: Uncomment to verify that all Taskmaster reference",
"# counts have been reset to zero.",
"#if node.ref_count != 0:",
"# from SCons.Debug import Trace",
"# Trace('node %s, ref_count %s !!!\\n' % (node, node.ref_count))",
"SCons",
".",
"SConsign",
".",
"Reset",
"(",
")",
"SCons",
".",
"Script",
".",
"Main",
".",
"progress_display",
"(",
"\"scons: done clearing node information.\"",
")"
] | 39.927928 | 22.333333 |
def commitData(self, widget):
"""
Commits the data from the widget to the model.
:param widget | <QWidget>
"""
self._editColumn = self.currentColumn()
self.itemChanged.connect(self._commitToSelected)
super(XOrbTreeWidget, self).commitData(widget)
self.itemChanged.disconnect(self._commitToSelected)
self._editColumn = None
|
[
"def",
"commitData",
"(",
"self",
",",
"widget",
")",
":",
"self",
".",
"_editColumn",
"=",
"self",
".",
"currentColumn",
"(",
")",
"self",
".",
"itemChanged",
".",
"connect",
"(",
"self",
".",
"_commitToSelected",
")",
"super",
"(",
"XOrbTreeWidget",
",",
"self",
")",
".",
"commitData",
"(",
"widget",
")",
"self",
".",
"itemChanged",
".",
"disconnect",
"(",
"self",
".",
"_commitToSelected",
")",
"self",
".",
"_editColumn",
"=",
"None"
] | 37.090909 | 11.454545 |
def db_manager(self):
"""
" Do series of DB operations.
"""
rc_create = self.create_db() # for first create
try:
self.load_db() # load existing/factory
except Exception as e:
_logger.debug("*** %s" % str(e))
try:
self.recover_db(self.backup_json_db_path)
except Exception:
pass
else:
if rc_create is True:
self.db_status = "factory"
else:
self.db_status = "existing"
return True
try:
self.load_db() # load backup
except Exception as b:
_logger.debug("*** %s" % str(b))
self.recover_db(self.factory_json_db_path)
self.load_db() # load factory
self.db_status = "factory"
else:
self.db_status = "backup"
finally:
return True
|
[
"def",
"db_manager",
"(",
"self",
")",
":",
"rc_create",
"=",
"self",
".",
"create_db",
"(",
")",
"# for first create",
"try",
":",
"self",
".",
"load_db",
"(",
")",
"# load existing/factory",
"except",
"Exception",
"as",
"e",
":",
"_logger",
".",
"debug",
"(",
"\"*** %s\"",
"%",
"str",
"(",
"e",
")",
")",
"try",
":",
"self",
".",
"recover_db",
"(",
"self",
".",
"backup_json_db_path",
")",
"except",
"Exception",
":",
"pass",
"else",
":",
"if",
"rc_create",
"is",
"True",
":",
"self",
".",
"db_status",
"=",
"\"factory\"",
"else",
":",
"self",
".",
"db_status",
"=",
"\"existing\"",
"return",
"True",
"try",
":",
"self",
".",
"load_db",
"(",
")",
"# load backup",
"except",
"Exception",
"as",
"b",
":",
"_logger",
".",
"debug",
"(",
"\"*** %s\"",
"%",
"str",
"(",
"b",
")",
")",
"self",
".",
"recover_db",
"(",
"self",
".",
"factory_json_db_path",
")",
"self",
".",
"load_db",
"(",
")",
"# load factory",
"self",
".",
"db_status",
"=",
"\"factory\"",
"else",
":",
"self",
".",
"db_status",
"=",
"\"backup\"",
"finally",
":",
"return",
"True"
] | 29.612903 | 13.419355 |
def change_columns(self, model, **fields):
"""Change fields."""
for name, field in fields.items():
old_field = model._meta.fields.get(name, field)
old_column_name = old_field and old_field.column_name
model._meta.add_field(name, field)
if isinstance(old_field, pw.ForeignKeyField):
self.ops.append(self.migrator.drop_foreign_key_constraint(
model._meta.table_name, old_column_name))
if old_column_name != field.column_name:
self.ops.append(
self.migrator.rename_column(
model._meta.table_name, old_column_name, field.column_name))
if isinstance(field, pw.ForeignKeyField):
on_delete = field.on_delete if field.on_delete else 'RESTRICT'
on_update = field.on_update if field.on_update else 'RESTRICT'
self.ops.append(self.migrator.add_foreign_key_constraint(
model._meta.table_name, field.column_name,
field.rel_model._meta.table_name, field.rel_field.name,
on_delete, on_update))
continue
self.ops.append(self.migrator.change_column(
model._meta.table_name, field.column_name, field))
if field.unique == old_field.unique:
continue
if field.unique:
index = (field.column_name,), field.unique
self.ops.append(self.migrator.add_index(model._meta.table_name, *index))
model._meta.indexes.append(index)
else:
index = (field.column_name,), old_field.unique
self.ops.append(self.migrator.drop_index(model._meta.table_name, *index))
model._meta.indexes.remove(index)
return model
|
[
"def",
"change_columns",
"(",
"self",
",",
"model",
",",
"*",
"*",
"fields",
")",
":",
"for",
"name",
",",
"field",
"in",
"fields",
".",
"items",
"(",
")",
":",
"old_field",
"=",
"model",
".",
"_meta",
".",
"fields",
".",
"get",
"(",
"name",
",",
"field",
")",
"old_column_name",
"=",
"old_field",
"and",
"old_field",
".",
"column_name",
"model",
".",
"_meta",
".",
"add_field",
"(",
"name",
",",
"field",
")",
"if",
"isinstance",
"(",
"old_field",
",",
"pw",
".",
"ForeignKeyField",
")",
":",
"self",
".",
"ops",
".",
"append",
"(",
"self",
".",
"migrator",
".",
"drop_foreign_key_constraint",
"(",
"model",
".",
"_meta",
".",
"table_name",
",",
"old_column_name",
")",
")",
"if",
"old_column_name",
"!=",
"field",
".",
"column_name",
":",
"self",
".",
"ops",
".",
"append",
"(",
"self",
".",
"migrator",
".",
"rename_column",
"(",
"model",
".",
"_meta",
".",
"table_name",
",",
"old_column_name",
",",
"field",
".",
"column_name",
")",
")",
"if",
"isinstance",
"(",
"field",
",",
"pw",
".",
"ForeignKeyField",
")",
":",
"on_delete",
"=",
"field",
".",
"on_delete",
"if",
"field",
".",
"on_delete",
"else",
"'RESTRICT'",
"on_update",
"=",
"field",
".",
"on_update",
"if",
"field",
".",
"on_update",
"else",
"'RESTRICT'",
"self",
".",
"ops",
".",
"append",
"(",
"self",
".",
"migrator",
".",
"add_foreign_key_constraint",
"(",
"model",
".",
"_meta",
".",
"table_name",
",",
"field",
".",
"column_name",
",",
"field",
".",
"rel_model",
".",
"_meta",
".",
"table_name",
",",
"field",
".",
"rel_field",
".",
"name",
",",
"on_delete",
",",
"on_update",
")",
")",
"continue",
"self",
".",
"ops",
".",
"append",
"(",
"self",
".",
"migrator",
".",
"change_column",
"(",
"model",
".",
"_meta",
".",
"table_name",
",",
"field",
".",
"column_name",
",",
"field",
")",
")",
"if",
"field",
".",
"unique",
"==",
"old_field",
".",
"unique",
":",
"continue",
"if",
"field",
".",
"unique",
":",
"index",
"=",
"(",
"field",
".",
"column_name",
",",
")",
",",
"field",
".",
"unique",
"self",
".",
"ops",
".",
"append",
"(",
"self",
".",
"migrator",
".",
"add_index",
"(",
"model",
".",
"_meta",
".",
"table_name",
",",
"*",
"index",
")",
")",
"model",
".",
"_meta",
".",
"indexes",
".",
"append",
"(",
"index",
")",
"else",
":",
"index",
"=",
"(",
"field",
".",
"column_name",
",",
")",
",",
"old_field",
".",
"unique",
"self",
".",
"ops",
".",
"append",
"(",
"self",
".",
"migrator",
".",
"drop_index",
"(",
"model",
".",
"_meta",
".",
"table_name",
",",
"*",
"index",
")",
")",
"model",
".",
"_meta",
".",
"indexes",
".",
"remove",
"(",
"index",
")",
"return",
"model"
] | 43.547619 | 23.595238 |
def finalize(self):
"""
finalize simulation for consumer
"""
# todo sort self.result by path_num
if self.result:
self.result = sorted(self.result, key=lambda x: x[0])
p, r = map(list, zip(*self.result))
self.result = r
|
[
"def",
"finalize",
"(",
"self",
")",
":",
"# todo sort self.result by path_num",
"if",
"self",
".",
"result",
":",
"self",
".",
"result",
"=",
"sorted",
"(",
"self",
".",
"result",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"p",
",",
"r",
"=",
"map",
"(",
"list",
",",
"zip",
"(",
"*",
"self",
".",
"result",
")",
")",
"self",
".",
"result",
"=",
"r"
] | 31.777778 | 9.555556 |
def register(self, model, **attr):
"""Register a model or a table with this mapper
:param model: a table or a :class:`.BaseModel` class
:return: a Model class or a table
"""
metadata = self.metadata
if not isinstance(model, Table):
model_name = self._create_model(model, **attr)
if not model_name:
return
model, name = model_name
table = model.__table__
self._declarative_register[name] = model
if name in self._bases:
for model in self._bases.pop(name):
self.register(model)
else:
table = model.tometadata(metadata)
model = table
# Register engine
engine = None
label = table.info.get('bind_label')
keys = ('%s.%s' % (label, table.key),
label, None) if label else (None,)
#
# Find the engine for this table
for key in keys:
engine = self.get_engine(key)
if engine:
break
assert engine
self.binds[table] = engine
return model
|
[
"def",
"register",
"(",
"self",
",",
"model",
",",
"*",
"*",
"attr",
")",
":",
"metadata",
"=",
"self",
".",
"metadata",
"if",
"not",
"isinstance",
"(",
"model",
",",
"Table",
")",
":",
"model_name",
"=",
"self",
".",
"_create_model",
"(",
"model",
",",
"*",
"*",
"attr",
")",
"if",
"not",
"model_name",
":",
"return",
"model",
",",
"name",
"=",
"model_name",
"table",
"=",
"model",
".",
"__table__",
"self",
".",
"_declarative_register",
"[",
"name",
"]",
"=",
"model",
"if",
"name",
"in",
"self",
".",
"_bases",
":",
"for",
"model",
"in",
"self",
".",
"_bases",
".",
"pop",
"(",
"name",
")",
":",
"self",
".",
"register",
"(",
"model",
")",
"else",
":",
"table",
"=",
"model",
".",
"tometadata",
"(",
"metadata",
")",
"model",
"=",
"table",
"# Register engine",
"engine",
"=",
"None",
"label",
"=",
"table",
".",
"info",
".",
"get",
"(",
"'bind_label'",
")",
"keys",
"=",
"(",
"'%s.%s'",
"%",
"(",
"label",
",",
"table",
".",
"key",
")",
",",
"label",
",",
"None",
")",
"if",
"label",
"else",
"(",
"None",
",",
")",
"#",
"# Find the engine for this table",
"for",
"key",
"in",
"keys",
":",
"engine",
"=",
"self",
".",
"get_engine",
"(",
"key",
")",
"if",
"engine",
":",
"break",
"assert",
"engine",
"self",
".",
"binds",
"[",
"table",
"]",
"=",
"engine",
"return",
"model"
] | 30.621622 | 13.756757 |
def noninteractive_changeset_update(self, fqn, template, old_parameters,
parameters, stack_policy, tags,
**kwargs):
"""Update a Cloudformation stack using a change set.
This is required for stacks with a defined Transform (i.e. SAM), as the
default update_stack API cannot be used with them.
Args:
fqn (str): The fully qualified name of the Cloudformation stack.
template (:class:`stacker.providers.base.Template`): A Template
object to use when updating the stack.
old_parameters (list): A list of dictionaries that defines the
parameter list on the existing Cloudformation stack.
parameters (list): A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack.
stack_policy (:class:`stacker.providers.base.Template`): A template
object representing a stack policy.
tags (list): A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack.
"""
logger.debug("Using noninterative changeset provider mode "
"for %s.", fqn)
_changes, change_set_id = create_change_set(
self.cloudformation, fqn, template, parameters, tags,
'UPDATE', service_role=self.service_role, **kwargs
)
self.deal_with_changeset_stack_policy(fqn, stack_policy)
self.cloudformation.execute_change_set(
ChangeSetName=change_set_id,
)
|
[
"def",
"noninteractive_changeset_update",
"(",
"self",
",",
"fqn",
",",
"template",
",",
"old_parameters",
",",
"parameters",
",",
"stack_policy",
",",
"tags",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"debug",
"(",
"\"Using noninterative changeset provider mode \"",
"\"for %s.\"",
",",
"fqn",
")",
"_changes",
",",
"change_set_id",
"=",
"create_change_set",
"(",
"self",
".",
"cloudformation",
",",
"fqn",
",",
"template",
",",
"parameters",
",",
"tags",
",",
"'UPDATE'",
",",
"service_role",
"=",
"self",
".",
"service_role",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"deal_with_changeset_stack_policy",
"(",
"fqn",
",",
"stack_policy",
")",
"self",
".",
"cloudformation",
".",
"execute_change_set",
"(",
"ChangeSetName",
"=",
"change_set_id",
",",
")"
] | 49.121212 | 24.727273 |
def flavor_access_list(name, projects, **kwargs):
'''
Grants access of the flavor to a project. Flavor must be private.
:param name: non-public flavor name
:param projects: list of projects which should have the access to the flavor
.. code-block:: yaml
nova-flavor-share:
nova.flavor_project_access:
- name: myflavor
- project:
- project1
- project2
To remove all project from access list:
.. code-block:: yaml
- project: []
'''
dry_run = __opts__['test']
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
kwargs.update({'filter': {'is_public': False}})
try:
flavor_list = __salt__['nova.flavor_list'](**kwargs)
flavor_id = flavor_list[name]['id']
except KeyError:
raise
project_list = __salt__['keystone.project_list'](**kwargs)
access_list = __salt__['nova.flavor_access_list'](flavor_id, **kwargs)
existing_list = [six.text_type(pname) for pname in project_list
if project_list[pname]['id'] in access_list[flavor_id]]
defined_list = [six.text_type(project) for project in projects]
add_list = set(defined_list) - set(existing_list)
remove_list = set(existing_list) - set(defined_list)
if not add_list and not remove_list:
ret['result'] = True
ret['comment'] = 'Flavor "{0}" access list corresponds to defined one.'.format(name)
else:
if dry_run:
ret['result'] = None
ret['comment'] = 'Flavor "{0}" access list would be corrected.'.format(name)
ret['changes'] = {name: {'new': defined_list, 'old': existing_list}}
else:
added = []
removed = []
if add_list:
for project in add_list:
added.append(__salt__['nova.flavor_access_add'](flavor_id, project_list[project]['id'], **kwargs))
if remove_list:
for project in remove_list:
removed.append(__salt__['nova.flavor_access_remove'](flavor_id,
project_list[project]['id'], **kwargs))
if any(add_list) or any(remove_list):
ret['result'] = True
ret['comment'] = 'Flavor "{0}" access list corrected.'.format(name)
ret['changes'] = {name: {'new': defined_list, 'old': existing_list}}
return ret
|
[
"def",
"flavor_access_list",
"(",
"name",
",",
"projects",
",",
"*",
"*",
"kwargs",
")",
":",
"dry_run",
"=",
"__opts__",
"[",
"'test'",
"]",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"False",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"kwargs",
".",
"update",
"(",
"{",
"'filter'",
":",
"{",
"'is_public'",
":",
"False",
"}",
"}",
")",
"try",
":",
"flavor_list",
"=",
"__salt__",
"[",
"'nova.flavor_list'",
"]",
"(",
"*",
"*",
"kwargs",
")",
"flavor_id",
"=",
"flavor_list",
"[",
"name",
"]",
"[",
"'id'",
"]",
"except",
"KeyError",
":",
"raise",
"project_list",
"=",
"__salt__",
"[",
"'keystone.project_list'",
"]",
"(",
"*",
"*",
"kwargs",
")",
"access_list",
"=",
"__salt__",
"[",
"'nova.flavor_access_list'",
"]",
"(",
"flavor_id",
",",
"*",
"*",
"kwargs",
")",
"existing_list",
"=",
"[",
"six",
".",
"text_type",
"(",
"pname",
")",
"for",
"pname",
"in",
"project_list",
"if",
"project_list",
"[",
"pname",
"]",
"[",
"'id'",
"]",
"in",
"access_list",
"[",
"flavor_id",
"]",
"]",
"defined_list",
"=",
"[",
"six",
".",
"text_type",
"(",
"project",
")",
"for",
"project",
"in",
"projects",
"]",
"add_list",
"=",
"set",
"(",
"defined_list",
")",
"-",
"set",
"(",
"existing_list",
")",
"remove_list",
"=",
"set",
"(",
"existing_list",
")",
"-",
"set",
"(",
"defined_list",
")",
"if",
"not",
"add_list",
"and",
"not",
"remove_list",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Flavor \"{0}\" access list corresponds to defined one.'",
".",
"format",
"(",
"name",
")",
"else",
":",
"if",
"dry_run",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Flavor \"{0}\" access list would be corrected.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"name",
":",
"{",
"'new'",
":",
"defined_list",
",",
"'old'",
":",
"existing_list",
"}",
"}",
"else",
":",
"added",
"=",
"[",
"]",
"removed",
"=",
"[",
"]",
"if",
"add_list",
":",
"for",
"project",
"in",
"add_list",
":",
"added",
".",
"append",
"(",
"__salt__",
"[",
"'nova.flavor_access_add'",
"]",
"(",
"flavor_id",
",",
"project_list",
"[",
"project",
"]",
"[",
"'id'",
"]",
",",
"*",
"*",
"kwargs",
")",
")",
"if",
"remove_list",
":",
"for",
"project",
"in",
"remove_list",
":",
"removed",
".",
"append",
"(",
"__salt__",
"[",
"'nova.flavor_access_remove'",
"]",
"(",
"flavor_id",
",",
"project_list",
"[",
"project",
"]",
"[",
"'id'",
"]",
",",
"*",
"*",
"kwargs",
")",
")",
"if",
"any",
"(",
"add_list",
")",
"or",
"any",
"(",
"remove_list",
")",
":",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Flavor \"{0}\" access list corrected.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"name",
":",
"{",
"'new'",
":",
"defined_list",
",",
"'old'",
":",
"existing_list",
"}",
"}",
"return",
"ret"
] | 40.278689 | 24.409836 |
def decode_cmd_out(self, completed_cmd):
"""
return a standard message
"""
try:
stdout = completed_cmd.stdout.encode('utf-8').decode()
except AttributeError:
try:
stdout = str(bytes(completed_cmd.stdout), 'big5').strip()
except AttributeError:
stdout = str(bytes(completed_cmd.stdout).decode('utf-8')).strip()
try:
stderr = completed_cmd.stderr.encode('utf-8').decode()
except AttributeError:
try:
stderr = str(bytes(completed_cmd.stderr), 'big5').strip()
except AttributeError:
stderr = str(bytes(completed_cmd.stderr).decode('utf-8')).strip()
return ParsedCompletedCommand(
completed_cmd.returncode,
completed_cmd.args,
stdout,
stderr
)
|
[
"def",
"decode_cmd_out",
"(",
"self",
",",
"completed_cmd",
")",
":",
"try",
":",
"stdout",
"=",
"completed_cmd",
".",
"stdout",
".",
"encode",
"(",
"'utf-8'",
")",
".",
"decode",
"(",
")",
"except",
"AttributeError",
":",
"try",
":",
"stdout",
"=",
"str",
"(",
"bytes",
"(",
"completed_cmd",
".",
"stdout",
")",
",",
"'big5'",
")",
".",
"strip",
"(",
")",
"except",
"AttributeError",
":",
"stdout",
"=",
"str",
"(",
"bytes",
"(",
"completed_cmd",
".",
"stdout",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
".",
"strip",
"(",
")",
"try",
":",
"stderr",
"=",
"completed_cmd",
".",
"stderr",
".",
"encode",
"(",
"'utf-8'",
")",
".",
"decode",
"(",
")",
"except",
"AttributeError",
":",
"try",
":",
"stderr",
"=",
"str",
"(",
"bytes",
"(",
"completed_cmd",
".",
"stderr",
")",
",",
"'big5'",
")",
".",
"strip",
"(",
")",
"except",
"AttributeError",
":",
"stderr",
"=",
"str",
"(",
"bytes",
"(",
"completed_cmd",
".",
"stderr",
")",
".",
"decode",
"(",
"'utf-8'",
")",
")",
".",
"strip",
"(",
")",
"return",
"ParsedCompletedCommand",
"(",
"completed_cmd",
".",
"returncode",
",",
"completed_cmd",
".",
"args",
",",
"stdout",
",",
"stderr",
")"
] | 36.291667 | 17.958333 |
def _all(self, *args, **kwargs):
'''
Return all the summary of the particular system.
'''
data = dict()
data['software'] = self._software(**kwargs)
data['system'] = self._system(**kwargs)
data['services'] = self._services(**kwargs)
try:
data['configuration'] = self._configuration(**kwargs)
except InspectorQueryException as ex:
data['configuration'] = 'N/A'
log.error(ex)
data['payload'] = self._payload(**kwargs) or 'N/A'
return data
|
[
"def",
"_all",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"dict",
"(",
")",
"data",
"[",
"'software'",
"]",
"=",
"self",
".",
"_software",
"(",
"*",
"*",
"kwargs",
")",
"data",
"[",
"'system'",
"]",
"=",
"self",
".",
"_system",
"(",
"*",
"*",
"kwargs",
")",
"data",
"[",
"'services'",
"]",
"=",
"self",
".",
"_services",
"(",
"*",
"*",
"kwargs",
")",
"try",
":",
"data",
"[",
"'configuration'",
"]",
"=",
"self",
".",
"_configuration",
"(",
"*",
"*",
"kwargs",
")",
"except",
"InspectorQueryException",
"as",
"ex",
":",
"data",
"[",
"'configuration'",
"]",
"=",
"'N/A'",
"log",
".",
"error",
"(",
"ex",
")",
"data",
"[",
"'payload'",
"]",
"=",
"self",
".",
"_payload",
"(",
"*",
"*",
"kwargs",
")",
"or",
"'N/A'",
"return",
"data"
] | 34.0625 | 17.6875 |
def get_active_position(self, category, name, nofallback=False):
"""
Get active position for given position name.
params:
category - Category model to look for
name - name of the position
nofallback - if True than do not fall back to parent
category if active position is not found for category
"""
now = timezone.now()
lookup = (Q(active_from__isnull=True) | Q(active_from__lte=now)) & \
(Q(active_till__isnull=True) | Q(active_till__gt=now))
while True:
try:
return self.get(lookup, category=category, name=name,
disabled=False)
except Position.DoesNotExist:
# if nofallback was specified, do not look into parent categories
if nofallback:
return False
# traverse the category tree to the top otherwise
category = category.tree_parent
# we reached the top and still haven't found the position - return
if category is None:
return False
|
[
"def",
"get_active_position",
"(",
"self",
",",
"category",
",",
"name",
",",
"nofallback",
"=",
"False",
")",
":",
"now",
"=",
"timezone",
".",
"now",
"(",
")",
"lookup",
"=",
"(",
"Q",
"(",
"active_from__isnull",
"=",
"True",
")",
"|",
"Q",
"(",
"active_from__lte",
"=",
"now",
")",
")",
"&",
"(",
"Q",
"(",
"active_till__isnull",
"=",
"True",
")",
"|",
"Q",
"(",
"active_till__gt",
"=",
"now",
")",
")",
"while",
"True",
":",
"try",
":",
"return",
"self",
".",
"get",
"(",
"lookup",
",",
"category",
"=",
"category",
",",
"name",
"=",
"name",
",",
"disabled",
"=",
"False",
")",
"except",
"Position",
".",
"DoesNotExist",
":",
"# if nofallback was specified, do not look into parent categories",
"if",
"nofallback",
":",
"return",
"False",
"# traverse the category tree to the top otherwise",
"category",
"=",
"category",
".",
"tree_parent",
"# we reached the top and still haven't found the position - return",
"if",
"category",
"is",
"None",
":",
"return",
"False"
] | 39.37931 | 20.551724 |
def create_observation_streams(self, num_streams, h_size, num_layers):
"""
Creates encoding stream for observations.
:param num_streams: Number of streams to create.
:param h_size: Size of hidden linear layers in stream.
:param num_layers: Number of hidden linear layers in stream.
:return: List of encoded streams.
"""
brain = self.brain
activation_fn = self.swish
self.visual_in = []
for i in range(brain.number_visual_observations):
visual_input = self.create_visual_input(brain.camera_resolutions[i],
name="visual_observation_" + str(i))
self.visual_in.append(visual_input)
vector_observation_input = self.create_vector_input()
final_hiddens = []
for i in range(num_streams):
visual_encoders = []
hidden_state, hidden_visual = None, None
if self.vis_obs_size > 0:
for j in range(brain.number_visual_observations):
encoded_visual = self.create_visual_observation_encoder(self.visual_in[j],
h_size,
activation_fn,
num_layers,
"main_graph_{}_encoder{}"
.format(i, j), False)
visual_encoders.append(encoded_visual)
hidden_visual = tf.concat(visual_encoders, axis=1)
if brain.vector_observation_space_size > 0:
hidden_state = self.create_vector_observation_encoder(vector_observation_input,
h_size, activation_fn,
num_layers,
"main_graph_{}".format(i),
False)
if hidden_state is not None and hidden_visual is not None:
final_hidden = tf.concat([hidden_visual, hidden_state], axis=1)
elif hidden_state is None and hidden_visual is not None:
final_hidden = hidden_visual
elif hidden_state is not None and hidden_visual is None:
final_hidden = hidden_state
else:
raise Exception("No valid network configuration possible. "
"There are no states or observations in this brain")
final_hiddens.append(final_hidden)
return final_hiddens
|
[
"def",
"create_observation_streams",
"(",
"self",
",",
"num_streams",
",",
"h_size",
",",
"num_layers",
")",
":",
"brain",
"=",
"self",
".",
"brain",
"activation_fn",
"=",
"self",
".",
"swish",
"self",
".",
"visual_in",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"brain",
".",
"number_visual_observations",
")",
":",
"visual_input",
"=",
"self",
".",
"create_visual_input",
"(",
"brain",
".",
"camera_resolutions",
"[",
"i",
"]",
",",
"name",
"=",
"\"visual_observation_\"",
"+",
"str",
"(",
"i",
")",
")",
"self",
".",
"visual_in",
".",
"append",
"(",
"visual_input",
")",
"vector_observation_input",
"=",
"self",
".",
"create_vector_input",
"(",
")",
"final_hiddens",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"num_streams",
")",
":",
"visual_encoders",
"=",
"[",
"]",
"hidden_state",
",",
"hidden_visual",
"=",
"None",
",",
"None",
"if",
"self",
".",
"vis_obs_size",
">",
"0",
":",
"for",
"j",
"in",
"range",
"(",
"brain",
".",
"number_visual_observations",
")",
":",
"encoded_visual",
"=",
"self",
".",
"create_visual_observation_encoder",
"(",
"self",
".",
"visual_in",
"[",
"j",
"]",
",",
"h_size",
",",
"activation_fn",
",",
"num_layers",
",",
"\"main_graph_{}_encoder{}\"",
".",
"format",
"(",
"i",
",",
"j",
")",
",",
"False",
")",
"visual_encoders",
".",
"append",
"(",
"encoded_visual",
")",
"hidden_visual",
"=",
"tf",
".",
"concat",
"(",
"visual_encoders",
",",
"axis",
"=",
"1",
")",
"if",
"brain",
".",
"vector_observation_space_size",
">",
"0",
":",
"hidden_state",
"=",
"self",
".",
"create_vector_observation_encoder",
"(",
"vector_observation_input",
",",
"h_size",
",",
"activation_fn",
",",
"num_layers",
",",
"\"main_graph_{}\"",
".",
"format",
"(",
"i",
")",
",",
"False",
")",
"if",
"hidden_state",
"is",
"not",
"None",
"and",
"hidden_visual",
"is",
"not",
"None",
":",
"final_hidden",
"=",
"tf",
".",
"concat",
"(",
"[",
"hidden_visual",
",",
"hidden_state",
"]",
",",
"axis",
"=",
"1",
")",
"elif",
"hidden_state",
"is",
"None",
"and",
"hidden_visual",
"is",
"not",
"None",
":",
"final_hidden",
"=",
"hidden_visual",
"elif",
"hidden_state",
"is",
"not",
"None",
"and",
"hidden_visual",
"is",
"None",
":",
"final_hidden",
"=",
"hidden_state",
"else",
":",
"raise",
"Exception",
"(",
"\"No valid network configuration possible. \"",
"\"There are no states or observations in this brain\"",
")",
"final_hiddens",
".",
"append",
"(",
"final_hidden",
")",
"return",
"final_hiddens"
] | 57.734694 | 26.142857 |
def download_and_transfer_sample(job, sample, inputs):
"""
Downloads a sample from CGHub via GeneTorrent, then uses S3AM to transfer it to S3
input_args: dict Dictionary of input arguments
analysis_id: str An analysis ID for a sample in CGHub
"""
analysis_id = sample[0]
work_dir = job.fileStore.getLocalTempDir()
folder_path = os.path.join(work_dir, os.path.basename(analysis_id))
# Acquire genetorrent key and download sample
shutil.copy(inputs['genetorrent_key'], os.path.join(work_dir, 'cghub.key'))
parameters = ['-vv', '-c', 'cghub.key', '-d', analysis_id]
docker_call(job=job, tool='quay.io/ucsc_cgl/genetorrent:3.8.7--9911761265b6f08bc3ef09f53af05f56848d805b',
work_dir=work_dir, parameters=parameters)
try:
sample = glob.glob(os.path.join(folder_path, '*tar*'))[0]
except KeyError as e:
print 'No tarfile found inside of folder: '.format(e)
raise
# Upload sample to S3AM
key_path = inputs['ssec']
if sample.endswith('gz'):
sample_name = analysis_id + '.tar.gz'
shutil.move(sample, os.path.join(work_dir, sample_name))
else:
sample_name = analysis_id + '.tar'
shutil.move(sample, os.path.join(work_dir, sample_name))
# Parse s3_dir to get bucket and s3 path
s3_dir = inputs['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, sample_name)
# Generate keyfile for upload
with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, 'temp.key'),
'file://{}'.format(os.path.join(work_dir, sample_name)),
's3://' + bucket_name + '/']
subprocess.check_call(s3am_command)
|
[
"def",
"download_and_transfer_sample",
"(",
"job",
",",
"sample",
",",
"inputs",
")",
":",
"analysis_id",
"=",
"sample",
"[",
"0",
"]",
"work_dir",
"=",
"job",
".",
"fileStore",
".",
"getLocalTempDir",
"(",
")",
"folder_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"analysis_id",
")",
")",
"# Acquire genetorrent key and download sample",
"shutil",
".",
"copy",
"(",
"inputs",
"[",
"'genetorrent_key'",
"]",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'cghub.key'",
")",
")",
"parameters",
"=",
"[",
"'-vv'",
",",
"'-c'",
",",
"'cghub.key'",
",",
"'-d'",
",",
"analysis_id",
"]",
"docker_call",
"(",
"job",
"=",
"job",
",",
"tool",
"=",
"'quay.io/ucsc_cgl/genetorrent:3.8.7--9911761265b6f08bc3ef09f53af05f56848d805b'",
",",
"work_dir",
"=",
"work_dir",
",",
"parameters",
"=",
"parameters",
")",
"try",
":",
"sample",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder_path",
",",
"'*tar*'",
")",
")",
"[",
"0",
"]",
"except",
"KeyError",
"as",
"e",
":",
"print",
"'No tarfile found inside of folder: '",
".",
"format",
"(",
"e",
")",
"raise",
"# Upload sample to S3AM",
"key_path",
"=",
"inputs",
"[",
"'ssec'",
"]",
"if",
"sample",
".",
"endswith",
"(",
"'gz'",
")",
":",
"sample_name",
"=",
"analysis_id",
"+",
"'.tar.gz'",
"shutil",
".",
"move",
"(",
"sample",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"sample_name",
")",
")",
"else",
":",
"sample_name",
"=",
"analysis_id",
"+",
"'.tar'",
"shutil",
".",
"move",
"(",
"sample",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"sample_name",
")",
")",
"# Parse s3_dir to get bucket and s3 path",
"s3_dir",
"=",
"inputs",
"[",
"'s3_dir'",
"]",
"bucket_name",
"=",
"s3_dir",
".",
"lstrip",
"(",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"base_url",
"=",
"'https://s3-us-west-2.amazonaws.com/'",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_url",
",",
"bucket_name",
",",
"sample_name",
")",
"# Generate keyfile for upload",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'temp.key'",
")",
",",
"'wb'",
")",
"as",
"f_out",
":",
"f_out",
".",
"write",
"(",
"generate_unique_key",
"(",
"key_path",
",",
"url",
")",
")",
"# Upload to S3 via S3AM",
"s3am_command",
"=",
"[",
"'s3am'",
",",
"'upload'",
",",
"'--sse-key-file'",
",",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'temp.key'",
")",
",",
"'file://{}'",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"sample_name",
")",
")",
",",
"'s3://'",
"+",
"bucket_name",
"+",
"'/'",
"]",
"subprocess",
".",
"check_call",
"(",
"s3am_command",
")"
] | 44.568182 | 19.204545 |
def compactness_frompts(geoseries):
"""
Inverse of 4 * pi * Area / perimeter^2
"""
measure = (
4 * 3.1415 * (
(geoseries.unary_union).convex_hull.area)
) / (
(geoseries.unary_union).convex_hull.length
)
return measure
|
[
"def",
"compactness_frompts",
"(",
"geoseries",
")",
":",
"measure",
"=",
"(",
"4",
"*",
"3.1415",
"*",
"(",
"(",
"geoseries",
".",
"unary_union",
")",
".",
"convex_hull",
".",
"area",
")",
")",
"/",
"(",
"(",
"geoseries",
".",
"unary_union",
")",
".",
"convex_hull",
".",
"length",
")",
"return",
"measure"
] | 25 | 14.272727 |
def remove_tar_files(file_list):
"""Public function that removes temporary tar archive files in a local directory"""
for f in file_list:
if file_exists(f) and f.endswith('.tar'):
os.remove(f)
|
[
"def",
"remove_tar_files",
"(",
"file_list",
")",
":",
"for",
"f",
"in",
"file_list",
":",
"if",
"file_exists",
"(",
"f",
")",
"and",
"f",
".",
"endswith",
"(",
"'.tar'",
")",
":",
"os",
".",
"remove",
"(",
"f",
")"
] | 43 | 10 |
def _populate_tournament_payoff_array0(payoff_array, k, indices, indptr):
"""
Populate `payoff_array` with the payoff values for player 0 in the
tournament game given a random tournament graph in CSR format.
Parameters
----------
payoff_array : ndarray(float, ndim=2)
ndarray of shape (n, m), where m = n choose k, prefilled with
zeros. Modified in place.
k : scalar(int)
Size of the subsets of nodes.
indices : ndarray(int, ndim=1)
CSR format index array of the adjacency matrix of the tournament
graph.
indptr : ndarray(int, ndim=1)
CSR format index pointer array of the adjacency matrix of the
tournament graph.
"""
n = payoff_array.shape[0]
X = np.empty(k, dtype=np.int_)
a = np.empty(k, dtype=np.int_)
for i in range(n):
d = indptr[i+1] - indptr[i]
if d >= k:
for j in range(k):
a[j] = j
while a[-1] < d:
for j in range(k):
X[j] = indices[indptr[i]+a[j]]
payoff_array[i, k_array_rank_jit(X)] = 1
a = next_k_array(a)
|
[
"def",
"_populate_tournament_payoff_array0",
"(",
"payoff_array",
",",
"k",
",",
"indices",
",",
"indptr",
")",
":",
"n",
"=",
"payoff_array",
".",
"shape",
"[",
"0",
"]",
"X",
"=",
"np",
".",
"empty",
"(",
"k",
",",
"dtype",
"=",
"np",
".",
"int_",
")",
"a",
"=",
"np",
".",
"empty",
"(",
"k",
",",
"dtype",
"=",
"np",
".",
"int_",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"d",
"=",
"indptr",
"[",
"i",
"+",
"1",
"]",
"-",
"indptr",
"[",
"i",
"]",
"if",
"d",
">=",
"k",
":",
"for",
"j",
"in",
"range",
"(",
"k",
")",
":",
"a",
"[",
"j",
"]",
"=",
"j",
"while",
"a",
"[",
"-",
"1",
"]",
"<",
"d",
":",
"for",
"j",
"in",
"range",
"(",
"k",
")",
":",
"X",
"[",
"j",
"]",
"=",
"indices",
"[",
"indptr",
"[",
"i",
"]",
"+",
"a",
"[",
"j",
"]",
"]",
"payoff_array",
"[",
"i",
",",
"k_array_rank_jit",
"(",
"X",
")",
"]",
"=",
"1",
"a",
"=",
"next_k_array",
"(",
"a",
")"
] | 34.121212 | 16.363636 |
def groupby_tags(item_list, tags_list):
r"""
case where an item can belong to multiple groups
Args:
item_list (list):
tags_list (list):
Returns:
dict: groupid_to_items
CommandLine:
python -m utool.util_dict --test-groupby_tags
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> tagged_item_list = {
>>> 'spam': ['meat', 'protein', 'food'],
>>> 'eggs': ['protein', 'food'],
>>> 'cheese': ['dairy', 'protein', 'food'],
>>> 'jam': ['fruit', 'food'],
>>> 'banana': ['weapon', 'fruit', 'food'],
>>> }
>>> item_list = list(tagged_item_list.keys())
>>> tags_list = list(tagged_item_list.values())
>>> groupid_to_items = groupby_tags(item_list, tags_list)
>>> groupid_to_items = ut.map_vals(sorted, groupid_to_items)
>>> result = ('groupid_to_items = %s' % (ut.repr4(groupid_to_items),))
>>> print(result)
groupid_to_items = {
'dairy': ['cheese'],
'food': ['banana', 'cheese', 'eggs', 'jam', 'spam'],
'fruit': ['banana', 'jam'],
'meat': ['spam'],
'protein': ['cheese', 'eggs', 'spam'],
'weapon': ['banana'],
}
"""
groupid_to_items = defaultdict(list)
for tags, item in zip(tags_list, item_list):
for tag in tags:
groupid_to_items[tag].append(item)
return groupid_to_items
|
[
"def",
"groupby_tags",
"(",
"item_list",
",",
"tags_list",
")",
":",
"groupid_to_items",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"tags",
",",
"item",
"in",
"zip",
"(",
"tags_list",
",",
"item_list",
")",
":",
"for",
"tag",
"in",
"tags",
":",
"groupid_to_items",
"[",
"tag",
"]",
".",
"append",
"(",
"item",
")",
"return",
"groupid_to_items"
] | 32.608696 | 16.76087 |
def recursive_division(self, cells, min_size, width, height, x=0, y=0, depth=0):
"""
Recursive division:
1. Split room randomly
1a. Dodge towards larger half if in doorway
2. Place doorway randomly
3. Repeat for each half
"""
assert isinstance(cells, list)
assert isinstance(min_size, int) or isinstance(min_size, float)
assert isinstance(width, int) or isinstance(width, float)
assert isinstance(height, int) or isinstance(height, float)
assert isinstance(x, int) or isinstance(x, float)
assert isinstance(y, int) or isinstance(y, float)
assert isinstance(depth, int)
if width <= min_size or height <= min_size:
return
# Choose axis to divide
if width < height:
axis = VERTICAL
elif height < width:
axis = HORIZONTAL
else:
axis = randint(0,1)
cut_size = height
gap_size = width
if axis == HORIZONTAL:
cut_size = width
gap_size = height
if cut_size-min_size < min_size:
#print('min cut')
return
if gap_size-min_size < min_size:
#print('min gap')
return
# Random division and doorway
cut = randint(min_size, cut_size-min_size)
gap = randint(min_size, gap_size-min_size)
if not (cut > 0 and gap > 0):
#print('Reached zero sized cell')
return
# Check if next tile is a doorway
def dodge_doors(cut):
assert isinstance(cut, int) or isinstance(cut, float)
empty = False
if axis == HORIZONTAL:
idx = x+gap_size
#print(idx,y+cut)
door = cells[idx][y+cut]
empty = empty or not door or not door.tile
#door.tile = cells[49][1].tile
idx = x
#print(idx,y+cut)
door = cells[idx][y+cut]
empty = empty or not door or not door.tile
#door.tile = cells[49][0].tile
else:
idx = y+gap_size
#print(x+cut, idx)
door = cells[x+cut][idx]
empty = empty or not door or not door.tile
#door.tile = cells[49][0].tile
idx = y
#print(x+cut,idx)
door = cells[x+cut][idx]
empty = empty or not door or not door.tile
#door.tile = cells[49][1].tile
# Try again on longest side
if empty:
#print('Door', idx, cut)
if gap + (min_size / 2) > (gap_size / 2) - (min_size / 2):
cut -= 1
else:
cut += 1
if cut < min_size or cut > cut_size-min_size:
#print('Reached minimum size')
return None
else:
return dodge_doors(cut)
return cut
# Skip doors check first time around
if depth > 0:
cut = dodge_doors(cut)
if cut is None:
#print('No viable cut found')
return None
depth += 1
# Create new wall tiles
for i in xrange(0, gap_size):
if abs(gap - i) > 0:
# Copy wall tile from (0,0)
if axis == HORIZONTAL:
cells[x+i][y+cut].tile = cells[0][0].tile
else:
cells[x+cut][y+i].tile = cells[0][0].tile
# Recurse into each half
#print(x, y, [cut, gap], [cut_size, gap_size], 'H' if (axis == HORIZONTAL) else 'V')
# N
nx, ny = x, y
w, h = [cut, height] if (axis == HORIZONTAL) else [width, cut]
self.recursive_division(cells, min_size, w, h, nx, ny, depth)
# S
nx, ny = [x+cut, y] if (axis != HORIZONTAL) else [x, y+cut]
w, h = [cut_size-cut, height] if (axis == HORIZONTAL) else [width, cut_size-cut]
self.recursive_division(cells, min_size, w, h, nx, ny, depth)
|
[
"def",
"recursive_division",
"(",
"self",
",",
"cells",
",",
"min_size",
",",
"width",
",",
"height",
",",
"x",
"=",
"0",
",",
"y",
"=",
"0",
",",
"depth",
"=",
"0",
")",
":",
"assert",
"isinstance",
"(",
"cells",
",",
"list",
")",
"assert",
"isinstance",
"(",
"min_size",
",",
"int",
")",
"or",
"isinstance",
"(",
"min_size",
",",
"float",
")",
"assert",
"isinstance",
"(",
"width",
",",
"int",
")",
"or",
"isinstance",
"(",
"width",
",",
"float",
")",
"assert",
"isinstance",
"(",
"height",
",",
"int",
")",
"or",
"isinstance",
"(",
"height",
",",
"float",
")",
"assert",
"isinstance",
"(",
"x",
",",
"int",
")",
"or",
"isinstance",
"(",
"x",
",",
"float",
")",
"assert",
"isinstance",
"(",
"y",
",",
"int",
")",
"or",
"isinstance",
"(",
"y",
",",
"float",
")",
"assert",
"isinstance",
"(",
"depth",
",",
"int",
")",
"if",
"width",
"<=",
"min_size",
"or",
"height",
"<=",
"min_size",
":",
"return",
"# Choose axis to divide",
"if",
"width",
"<",
"height",
":",
"axis",
"=",
"VERTICAL",
"elif",
"height",
"<",
"width",
":",
"axis",
"=",
"HORIZONTAL",
"else",
":",
"axis",
"=",
"randint",
"(",
"0",
",",
"1",
")",
"cut_size",
"=",
"height",
"gap_size",
"=",
"width",
"if",
"axis",
"==",
"HORIZONTAL",
":",
"cut_size",
"=",
"width",
"gap_size",
"=",
"height",
"if",
"cut_size",
"-",
"min_size",
"<",
"min_size",
":",
"#print('min cut')",
"return",
"if",
"gap_size",
"-",
"min_size",
"<",
"min_size",
":",
"#print('min gap')",
"return",
"# Random division and doorway",
"cut",
"=",
"randint",
"(",
"min_size",
",",
"cut_size",
"-",
"min_size",
")",
"gap",
"=",
"randint",
"(",
"min_size",
",",
"gap_size",
"-",
"min_size",
")",
"if",
"not",
"(",
"cut",
">",
"0",
"and",
"gap",
">",
"0",
")",
":",
"#print('Reached zero sized cell')",
"return",
"# Check if next tile is a doorway",
"def",
"dodge_doors",
"(",
"cut",
")",
":",
"assert",
"isinstance",
"(",
"cut",
",",
"int",
")",
"or",
"isinstance",
"(",
"cut",
",",
"float",
")",
"empty",
"=",
"False",
"if",
"axis",
"==",
"HORIZONTAL",
":",
"idx",
"=",
"x",
"+",
"gap_size",
"#print(idx,y+cut)",
"door",
"=",
"cells",
"[",
"idx",
"]",
"[",
"y",
"+",
"cut",
"]",
"empty",
"=",
"empty",
"or",
"not",
"door",
"or",
"not",
"door",
".",
"tile",
"#door.tile = cells[49][1].tile",
"idx",
"=",
"x",
"#print(idx,y+cut)",
"door",
"=",
"cells",
"[",
"idx",
"]",
"[",
"y",
"+",
"cut",
"]",
"empty",
"=",
"empty",
"or",
"not",
"door",
"or",
"not",
"door",
".",
"tile",
"#door.tile = cells[49][0].tile",
"else",
":",
"idx",
"=",
"y",
"+",
"gap_size",
"#print(x+cut, idx)",
"door",
"=",
"cells",
"[",
"x",
"+",
"cut",
"]",
"[",
"idx",
"]",
"empty",
"=",
"empty",
"or",
"not",
"door",
"or",
"not",
"door",
".",
"tile",
"#door.tile = cells[49][0].tile",
"idx",
"=",
"y",
"#print(x+cut,idx)",
"door",
"=",
"cells",
"[",
"x",
"+",
"cut",
"]",
"[",
"idx",
"]",
"empty",
"=",
"empty",
"or",
"not",
"door",
"or",
"not",
"door",
".",
"tile",
"#door.tile = cells[49][1].tile",
"# Try again on longest side",
"if",
"empty",
":",
"#print('Door', idx, cut)",
"if",
"gap",
"+",
"(",
"min_size",
"/",
"2",
")",
">",
"(",
"gap_size",
"/",
"2",
")",
"-",
"(",
"min_size",
"/",
"2",
")",
":",
"cut",
"-=",
"1",
"else",
":",
"cut",
"+=",
"1",
"if",
"cut",
"<",
"min_size",
"or",
"cut",
">",
"cut_size",
"-",
"min_size",
":",
"#print('Reached minimum size')",
"return",
"None",
"else",
":",
"return",
"dodge_doors",
"(",
"cut",
")",
"return",
"cut",
"# Skip doors check first time around",
"if",
"depth",
">",
"0",
":",
"cut",
"=",
"dodge_doors",
"(",
"cut",
")",
"if",
"cut",
"is",
"None",
":",
"#print('No viable cut found')",
"return",
"None",
"depth",
"+=",
"1",
"# Create new wall tiles",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"gap_size",
")",
":",
"if",
"abs",
"(",
"gap",
"-",
"i",
")",
">",
"0",
":",
"# Copy wall tile from (0,0)",
"if",
"axis",
"==",
"HORIZONTAL",
":",
"cells",
"[",
"x",
"+",
"i",
"]",
"[",
"y",
"+",
"cut",
"]",
".",
"tile",
"=",
"cells",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"tile",
"else",
":",
"cells",
"[",
"x",
"+",
"cut",
"]",
"[",
"y",
"+",
"i",
"]",
".",
"tile",
"=",
"cells",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"tile",
"# Recurse into each half",
"#print(x, y, [cut, gap], [cut_size, gap_size], 'H' if (axis == HORIZONTAL) else 'V')",
"# N",
"nx",
",",
"ny",
"=",
"x",
",",
"y",
"w",
",",
"h",
"=",
"[",
"cut",
",",
"height",
"]",
"if",
"(",
"axis",
"==",
"HORIZONTAL",
")",
"else",
"[",
"width",
",",
"cut",
"]",
"self",
".",
"recursive_division",
"(",
"cells",
",",
"min_size",
",",
"w",
",",
"h",
",",
"nx",
",",
"ny",
",",
"depth",
")",
"# S",
"nx",
",",
"ny",
"=",
"[",
"x",
"+",
"cut",
",",
"y",
"]",
"if",
"(",
"axis",
"!=",
"HORIZONTAL",
")",
"else",
"[",
"x",
",",
"y",
"+",
"cut",
"]",
"w",
",",
"h",
"=",
"[",
"cut_size",
"-",
"cut",
",",
"height",
"]",
"if",
"(",
"axis",
"==",
"HORIZONTAL",
")",
"else",
"[",
"width",
",",
"cut_size",
"-",
"cut",
"]",
"self",
".",
"recursive_division",
"(",
"cells",
",",
"min_size",
",",
"w",
",",
"h",
",",
"nx",
",",
"ny",
",",
"depth",
")"
] | 33.327869 | 17.196721 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.