text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def _exp_schedule(iteration, k=20, lam=0.005, limit=100):
'''
Possible scheduler for simulated_annealing, based on the aima example.
'''
return k * math.exp(-lam * iteration)
|
[
"def",
"_exp_schedule",
"(",
"iteration",
",",
"k",
"=",
"20",
",",
"lam",
"=",
"0.005",
",",
"limit",
"=",
"100",
")",
":",
"return",
"k",
"*",
"math",
".",
"exp",
"(",
"-",
"lam",
"*",
"iteration",
")"
] | 37.2 | 23.6 |
def as_dict(self):
"""json friendly dict representation of Kpoints"""
d = {"comment": self.comment, "nkpoints": self.num_kpts,
"generation_style": self.style.name, "kpoints": self.kpts,
"usershift": self.kpts_shift,
"kpts_weights": self.kpts_weights, "coord_type": self.coord_type,
"labels": self.labels, "tet_number": self.tet_number,
"tet_weight": self.tet_weight,
"tet_connections": self.tet_connections}
optional_paras = ["genvec1", "genvec2", "genvec3", "shift"]
for para in optional_paras:
if para in self.__dict__:
d[para] = self.__dict__[para]
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
|
[
"def",
"as_dict",
"(",
"self",
")",
":",
"d",
"=",
"{",
"\"comment\"",
":",
"self",
".",
"comment",
",",
"\"nkpoints\"",
":",
"self",
".",
"num_kpts",
",",
"\"generation_style\"",
":",
"self",
".",
"style",
".",
"name",
",",
"\"kpoints\"",
":",
"self",
".",
"kpts",
",",
"\"usershift\"",
":",
"self",
".",
"kpts_shift",
",",
"\"kpts_weights\"",
":",
"self",
".",
"kpts_weights",
",",
"\"coord_type\"",
":",
"self",
".",
"coord_type",
",",
"\"labels\"",
":",
"self",
".",
"labels",
",",
"\"tet_number\"",
":",
"self",
".",
"tet_number",
",",
"\"tet_weight\"",
":",
"self",
".",
"tet_weight",
",",
"\"tet_connections\"",
":",
"self",
".",
"tet_connections",
"}",
"optional_paras",
"=",
"[",
"\"genvec1\"",
",",
"\"genvec2\"",
",",
"\"genvec3\"",
",",
"\"shift\"",
"]",
"for",
"para",
"in",
"optional_paras",
":",
"if",
"para",
"in",
"self",
".",
"__dict__",
":",
"d",
"[",
"para",
"]",
"=",
"self",
".",
"__dict__",
"[",
"para",
"]",
"d",
"[",
"\"@module\"",
"]",
"=",
"self",
".",
"__class__",
".",
"__module__",
"d",
"[",
"\"@class\"",
"]",
"=",
"self",
".",
"__class__",
".",
"__name__",
"return",
"d"
] | 46.235294 | 16.235294 |
def output_args(f):
"""decorator for output-formatting args
applied to %pxresult and %%px
"""
args = [
magic_arguments.argument('-r', action="store_const", dest='groupby',
const='order',
help="collate outputs in order (same as group-outputs=order)"
),
magic_arguments.argument('-e', action="store_const", dest='groupby',
const='engine',
help="group outputs by engine (same as group-outputs=engine)"
),
magic_arguments.argument('--group-outputs', dest='groupby', type=str,
choices=['engine', 'order', 'type'], default='type',
help="""Group the outputs in a particular way.
Choices are:
type: group outputs of all engines by type (stdout, stderr, displaypub, etc.).
engine: display all output for each engine together.
order: like type, but individual displaypub output from each engine is collated.
For example, if multiple plots are generated by each engine, the first
figure of each engine will be displayed, then the second of each, etc.
"""
),
magic_arguments.argument('-o', '--out', dest='save_name', type=str,
help="""store the AsyncResult object for this computation
in the global namespace under this name.
"""
),
]
for a in args:
f = a(f)
return f
|
[
"def",
"output_args",
"(",
"f",
")",
":",
"args",
"=",
"[",
"magic_arguments",
".",
"argument",
"(",
"'-r'",
",",
"action",
"=",
"\"store_const\"",
",",
"dest",
"=",
"'groupby'",
",",
"const",
"=",
"'order'",
",",
"help",
"=",
"\"collate outputs in order (same as group-outputs=order)\"",
")",
",",
"magic_arguments",
".",
"argument",
"(",
"'-e'",
",",
"action",
"=",
"\"store_const\"",
",",
"dest",
"=",
"'groupby'",
",",
"const",
"=",
"'engine'",
",",
"help",
"=",
"\"group outputs by engine (same as group-outputs=engine)\"",
")",
",",
"magic_arguments",
".",
"argument",
"(",
"'--group-outputs'",
",",
"dest",
"=",
"'groupby'",
",",
"type",
"=",
"str",
",",
"choices",
"=",
"[",
"'engine'",
",",
"'order'",
",",
"'type'",
"]",
",",
"default",
"=",
"'type'",
",",
"help",
"=",
"\"\"\"Group the outputs in a particular way.\n \n Choices are:\n \n type: group outputs of all engines by type (stdout, stderr, displaypub, etc.).\n \n engine: display all output for each engine together.\n\n order: like type, but individual displaypub output from each engine is collated.\n For example, if multiple plots are generated by each engine, the first\n figure of each engine will be displayed, then the second of each, etc.\n \"\"\"",
")",
",",
"magic_arguments",
".",
"argument",
"(",
"'-o'",
",",
"'--out'",
",",
"dest",
"=",
"'save_name'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"\"\"store the AsyncResult object for this computation\n in the global namespace under this name.\n \"\"\"",
")",
",",
"]",
"for",
"a",
"in",
"args",
":",
"f",
"=",
"a",
"(",
"f",
")",
"return",
"f"
] | 38.631579 | 25.184211 |
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.extensions['inspire-crawler'] = self
app.cli.add_command(crawler_cmd)
|
[
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"self",
".",
"init_config",
"(",
"app",
")",
"app",
".",
"extensions",
"[",
"'inspire-crawler'",
"]",
"=",
"self",
"app",
".",
"cli",
".",
"add_command",
"(",
"crawler_cmd",
")"
] | 37.6 | 7 |
def read_blob(self,blob_dim,n_blob=0):
"""Read blob from a selection.
"""
n_blobs = self.calc_n_blobs(blob_dim)
if n_blob > n_blobs or n_blob < 0:
raise ValueError('Please provide correct n_blob value. Given %i, but max values is %i'%(n_blob,n_blobs))
# This prevents issues when the last blob is smaller than the others in time.
if blob_dim[self.time_axis]*(n_blob+1) > self.selection_shape[self.time_axis]:
updated_blob_dim = (int(self.selection_shape[self.time_axis] - blob_dim[self.time_axis]*n_blob), 1, int(blob_dim[self.freq_axis]))
else:
updated_blob_dim = [int(i) for i in blob_dim]
blob_start = self._find_blob_start()
blob = np.zeros(updated_blob_dim, dtype=self._d_type)
# EE: For now; also assuming one polarization and one beam.
# Assuming the blob will loop over the whole frequency range.
if self.f_start == self.f_begin and self.f_stop == self.f_end:
blob_flat_size = np.prod(blob_dim)
updated_blob_flat_size = np.prod(updated_blob_dim)
# Load binary data
with open(self.filename, 'rb') as f:
f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_flat_size)))
dd = np.fromfile(f, count=updated_blob_flat_size, dtype=self._d_type)
if dd.shape[0] == updated_blob_flat_size:
blob = dd.reshape(updated_blob_dim)
else:
logger.info('DD shape != blob shape.')
blob = dd.reshape((int(dd.shape[0]/blob_dim[self.freq_axis]),blob_dim[self.beam_axis],blob_dim[self.freq_axis]))
else:
for blobt in range(updated_blob_dim[self.time_axis]):
#Load binary data
with open(self.filename, 'rb') as f:
f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_dim[self.time_axis]*self.n_channels_in_file + blobt*self.n_channels_in_file)))
dd = np.fromfile(f, count=blob_dim[self.freq_axis], dtype=self._d_type)
blob[blobt] = dd
# if self.header[b'foff'] < 0:
# blob = blob[:,:,::-1]
return blob
|
[
"def",
"read_blob",
"(",
"self",
",",
"blob_dim",
",",
"n_blob",
"=",
"0",
")",
":",
"n_blobs",
"=",
"self",
".",
"calc_n_blobs",
"(",
"blob_dim",
")",
"if",
"n_blob",
">",
"n_blobs",
"or",
"n_blob",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'Please provide correct n_blob value. Given %i, but max values is %i'",
"%",
"(",
"n_blob",
",",
"n_blobs",
")",
")",
"# This prevents issues when the last blob is smaller than the others in time.",
"if",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"*",
"(",
"n_blob",
"+",
"1",
")",
">",
"self",
".",
"selection_shape",
"[",
"self",
".",
"time_axis",
"]",
":",
"updated_blob_dim",
"=",
"(",
"int",
"(",
"self",
".",
"selection_shape",
"[",
"self",
".",
"time_axis",
"]",
"-",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"*",
"n_blob",
")",
",",
"1",
",",
"int",
"(",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
")",
")",
"else",
":",
"updated_blob_dim",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"blob_dim",
"]",
"blob_start",
"=",
"self",
".",
"_find_blob_start",
"(",
")",
"blob",
"=",
"np",
".",
"zeros",
"(",
"updated_blob_dim",
",",
"dtype",
"=",
"self",
".",
"_d_type",
")",
"# EE: For now; also assuming one polarization and one beam.",
"# Assuming the blob will loop over the whole frequency range.",
"if",
"self",
".",
"f_start",
"==",
"self",
".",
"f_begin",
"and",
"self",
".",
"f_stop",
"==",
"self",
".",
"f_end",
":",
"blob_flat_size",
"=",
"np",
".",
"prod",
"(",
"blob_dim",
")",
"updated_blob_flat_size",
"=",
"np",
".",
"prod",
"(",
"updated_blob_dim",
")",
"# Load binary data",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"f",
".",
"seek",
"(",
"int",
"(",
"self",
".",
"idx_data",
"+",
"self",
".",
"_n_bytes",
"*",
"(",
"blob_start",
"+",
"n_blob",
"*",
"blob_flat_size",
")",
")",
")",
"dd",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"count",
"=",
"updated_blob_flat_size",
",",
"dtype",
"=",
"self",
".",
"_d_type",
")",
"if",
"dd",
".",
"shape",
"[",
"0",
"]",
"==",
"updated_blob_flat_size",
":",
"blob",
"=",
"dd",
".",
"reshape",
"(",
"updated_blob_dim",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'DD shape != blob shape.'",
")",
"blob",
"=",
"dd",
".",
"reshape",
"(",
"(",
"int",
"(",
"dd",
".",
"shape",
"[",
"0",
"]",
"/",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
")",
",",
"blob_dim",
"[",
"self",
".",
"beam_axis",
"]",
",",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
")",
")",
"else",
":",
"for",
"blobt",
"in",
"range",
"(",
"updated_blob_dim",
"[",
"self",
".",
"time_axis",
"]",
")",
":",
"#Load binary data",
"with",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"f",
".",
"seek",
"(",
"int",
"(",
"self",
".",
"idx_data",
"+",
"self",
".",
"_n_bytes",
"*",
"(",
"blob_start",
"+",
"n_blob",
"*",
"blob_dim",
"[",
"self",
".",
"time_axis",
"]",
"*",
"self",
".",
"n_channels_in_file",
"+",
"blobt",
"*",
"self",
".",
"n_channels_in_file",
")",
")",
")",
"dd",
"=",
"np",
".",
"fromfile",
"(",
"f",
",",
"count",
"=",
"blob_dim",
"[",
"self",
".",
"freq_axis",
"]",
",",
"dtype",
"=",
"self",
".",
"_d_type",
")",
"blob",
"[",
"blobt",
"]",
"=",
"dd",
"# if self.header[b'foff'] < 0:",
"# blob = blob[:,:,::-1]",
"return",
"blob"
] | 44.02 | 30.72 |
def get_cache_key(self, offset=0, limit=0, order=None, post_slug=''):
""" The return of Get
"""
return hashlib.sha1(
'.'.join([
str(self._get_data_source_url()),
str(offset),
str(limit),
str(order),
str(post_slug),
])
).hexdigest()
|
[
"def",
"get_cache_key",
"(",
"self",
",",
"offset",
"=",
"0",
",",
"limit",
"=",
"0",
",",
"order",
"=",
"None",
",",
"post_slug",
"=",
"''",
")",
":",
"return",
"hashlib",
".",
"sha1",
"(",
"'.'",
".",
"join",
"(",
"[",
"str",
"(",
"self",
".",
"_get_data_source_url",
"(",
")",
")",
",",
"str",
"(",
"offset",
")",
",",
"str",
"(",
"limit",
")",
",",
"str",
"(",
"order",
")",
",",
"str",
"(",
"post_slug",
")",
",",
"]",
")",
")",
".",
"hexdigest",
"(",
")"
] | 30 | 13 |
def _fill(self, data, total_length, padding_symbol):
""" Overridden :meth:`.WSimplePadding._fill` method. This methods adds padding symbol at the beginning
and at the end of the specified data.
:param data: data to append to
:param total_length: target length
:param padding_symbol: symbol to pad
:return: bytes
"""
delta = total_length - len(data)
return ((padding_symbol * random_int(delta)) + data).ljust(total_length, padding_symbol)
|
[
"def",
"_fill",
"(",
"self",
",",
"data",
",",
"total_length",
",",
"padding_symbol",
")",
":",
"delta",
"=",
"total_length",
"-",
"len",
"(",
"data",
")",
"return",
"(",
"(",
"padding_symbol",
"*",
"random_int",
"(",
"delta",
")",
")",
"+",
"data",
")",
".",
"ljust",
"(",
"total_length",
",",
"padding_symbol",
")"
] | 37.166667 | 15.583333 |
def getConfigRoot(cls, create = False):
"""
Return the mapped configuration root node
"""
try:
return manager.gettree(getattr(cls, 'configkey'), create)
except AttributeError:
return None
|
[
"def",
"getConfigRoot",
"(",
"cls",
",",
"create",
"=",
"False",
")",
":",
"try",
":",
"return",
"manager",
".",
"gettree",
"(",
"getattr",
"(",
"cls",
",",
"'configkey'",
")",
",",
"create",
")",
"except",
"AttributeError",
":",
"return",
"None"
] | 30.5 | 11.75 |
def tab_under_menu(self):
"""
Returns the tab that sits under the context menu.
:return: QWidget
"""
if self._menu_pos:
return self.tabBar().tabAt(self._menu_pos)
else:
return self.currentIndex()
|
[
"def",
"tab_under_menu",
"(",
"self",
")",
":",
"if",
"self",
".",
"_menu_pos",
":",
"return",
"self",
".",
"tabBar",
"(",
")",
".",
"tabAt",
"(",
"self",
".",
"_menu_pos",
")",
"else",
":",
"return",
"self",
".",
"currentIndex",
"(",
")"
] | 28.777778 | 11.666667 |
def display_upstream_structure(structure_dict):
"""Displays pipeline structure in the jupyter notebook.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`.
"""
graph = _create_graph(structure_dict)
plt = Image(graph.create_png())
display(plt)
|
[
"def",
"display_upstream_structure",
"(",
"structure_dict",
")",
":",
"graph",
"=",
"_create_graph",
"(",
"structure_dict",
")",
"plt",
"=",
"Image",
"(",
"graph",
".",
"create_png",
"(",
")",
")",
"display",
"(",
"plt",
")"
] | 31.8 | 13.2 |
def loop(self):
"""
Thread's main loop. Don't meant to be called by user directly.
Call inherited start() method instead.
Events are read only once time every min(read_freq, timeout)
seconds at best and only if the size of events to read is >= threshold.
"""
# When the loop must be terminated .stop() is called, 'stop'
# is written to pipe fd so poll() returns and .check_events()
# returns False which make evaluate the While's stop condition
# ._stop_event.isSet() wich put an end to the thread's execution.
while not self._stop_event.isSet():
self.process_events()
ref_time = time.time()
if self.check_events():
self._sleep(ref_time)
self.read_events()
|
[
"def",
"loop",
"(",
"self",
")",
":",
"# When the loop must be terminated .stop() is called, 'stop'",
"# is written to pipe fd so poll() returns and .check_events()",
"# returns False which make evaluate the While's stop condition",
"# ._stop_event.isSet() wich put an end to the thread's execution.",
"while",
"not",
"self",
".",
"_stop_event",
".",
"isSet",
"(",
")",
":",
"self",
".",
"process_events",
"(",
")",
"ref_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"check_events",
"(",
")",
":",
"self",
".",
"_sleep",
"(",
"ref_time",
")",
"self",
".",
"read_events",
"(",
")"
] | 44.222222 | 17.666667 |
def load(self, filename):
"""Load file information from a filename."""
self.metadata_blocks = []
self.tags = None
self.cuesheet = None
self.seektable = None
self.filename = filename
fileobj = StrictFileObject(open(filename, "rb"))
try:
self.__check_header(fileobj)
while self.__read_metadata_block(fileobj):
pass
finally:
fileobj.close()
try:
self.metadata_blocks[0].length
except (AttributeError, IndexError):
raise FLACNoHeaderError("Stream info block not found")
|
[
"def",
"load",
"(",
"self",
",",
"filename",
")",
":",
"self",
".",
"metadata_blocks",
"=",
"[",
"]",
"self",
".",
"tags",
"=",
"None",
"self",
".",
"cuesheet",
"=",
"None",
"self",
".",
"seektable",
"=",
"None",
"self",
".",
"filename",
"=",
"filename",
"fileobj",
"=",
"StrictFileObject",
"(",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
")",
"try",
":",
"self",
".",
"__check_header",
"(",
"fileobj",
")",
"while",
"self",
".",
"__read_metadata_block",
"(",
"fileobj",
")",
":",
"pass",
"finally",
":",
"fileobj",
".",
"close",
"(",
")",
"try",
":",
"self",
".",
"metadata_blocks",
"[",
"0",
"]",
".",
"length",
"except",
"(",
"AttributeError",
",",
"IndexError",
")",
":",
"raise",
"FLACNoHeaderError",
"(",
"\"Stream info block not found\"",
")"
] | 30.6 | 16.2 |
def extract_physical_plan(self, topology):
"""
Returns the representation of physical plan that will
be returned from Tracker.
"""
physicalPlan = {
"instances": {},
"instance_groups": {},
"stmgrs": {},
"spouts": {},
"bolts": {},
"config": {},
"components": {}
}
if not topology.physical_plan:
return physicalPlan
spouts = topology.spouts()
bolts = topology.bolts()
stmgrs = None
instances = None
# Physical Plan
stmgrs = list(topology.physical_plan.stmgrs)
instances = list(topology.physical_plan.instances)
# Configs
if topology.physical_plan.topology.topology_config:
physicalPlan["config"] = convert_pb_kvs(topology.physical_plan.topology.topology_config.kvs)
for spout in spouts:
spout_name = spout.comp.name
physicalPlan["spouts"][spout_name] = []
if spout_name not in physicalPlan["components"]:
physicalPlan["components"][spout_name] = {
"config": convert_pb_kvs(spout.comp.config.kvs)
}
for bolt in bolts:
bolt_name = bolt.comp.name
physicalPlan["bolts"][bolt_name] = []
if bolt_name not in physicalPlan["components"]:
physicalPlan["components"][bolt_name] = {
"config": convert_pb_kvs(bolt.comp.config.kvs)
}
for stmgr in stmgrs:
host = stmgr.host_name
cwd = stmgr.cwd
shell_port = stmgr.shell_port if stmgr.HasField("shell_port") else None
physicalPlan["stmgrs"][stmgr.id] = {
"id": stmgr.id,
"host": host,
"port": stmgr.data_port,
"shell_port": shell_port,
"cwd": cwd,
"pid": stmgr.pid,
"joburl": utils.make_shell_job_url(host, shell_port, cwd),
"logfiles": utils.make_shell_logfiles_url(host, shell_port, cwd),
"instance_ids": []
}
instance_groups = collections.OrderedDict()
for instance in instances:
instance_id = instance.instance_id
stmgrId = instance.stmgr_id
name = instance.info.component_name
stmgrInfo = physicalPlan["stmgrs"][stmgrId]
host = stmgrInfo["host"]
cwd = stmgrInfo["cwd"]
shell_port = stmgrInfo["shell_port"]
# instance_id format container_<index>_component_1
# group name is container_<index>
group_name = instance_id.rsplit("_", 2)[0]
igroup = instance_groups.get(group_name, list())
igroup.append(instance_id)
instance_groups[group_name] = igroup
physicalPlan["instances"][instance_id] = {
"id": instance_id,
"name": name,
"stmgrId": stmgrId,
"logfile": utils.make_shell_logfiles_url(host, shell_port, cwd, instance.instance_id),
}
physicalPlan["stmgrs"][stmgrId]["instance_ids"].append(instance_id)
if name in physicalPlan["spouts"]:
physicalPlan["spouts"][name].append(instance_id)
else:
physicalPlan["bolts"][name].append(instance_id)
physicalPlan["instance_groups"] = instance_groups
return physicalPlan
|
[
"def",
"extract_physical_plan",
"(",
"self",
",",
"topology",
")",
":",
"physicalPlan",
"=",
"{",
"\"instances\"",
":",
"{",
"}",
",",
"\"instance_groups\"",
":",
"{",
"}",
",",
"\"stmgrs\"",
":",
"{",
"}",
",",
"\"spouts\"",
":",
"{",
"}",
",",
"\"bolts\"",
":",
"{",
"}",
",",
"\"config\"",
":",
"{",
"}",
",",
"\"components\"",
":",
"{",
"}",
"}",
"if",
"not",
"topology",
".",
"physical_plan",
":",
"return",
"physicalPlan",
"spouts",
"=",
"topology",
".",
"spouts",
"(",
")",
"bolts",
"=",
"topology",
".",
"bolts",
"(",
")",
"stmgrs",
"=",
"None",
"instances",
"=",
"None",
"# Physical Plan",
"stmgrs",
"=",
"list",
"(",
"topology",
".",
"physical_plan",
".",
"stmgrs",
")",
"instances",
"=",
"list",
"(",
"topology",
".",
"physical_plan",
".",
"instances",
")",
"# Configs",
"if",
"topology",
".",
"physical_plan",
".",
"topology",
".",
"topology_config",
":",
"physicalPlan",
"[",
"\"config\"",
"]",
"=",
"convert_pb_kvs",
"(",
"topology",
".",
"physical_plan",
".",
"topology",
".",
"topology_config",
".",
"kvs",
")",
"for",
"spout",
"in",
"spouts",
":",
"spout_name",
"=",
"spout",
".",
"comp",
".",
"name",
"physicalPlan",
"[",
"\"spouts\"",
"]",
"[",
"spout_name",
"]",
"=",
"[",
"]",
"if",
"spout_name",
"not",
"in",
"physicalPlan",
"[",
"\"components\"",
"]",
":",
"physicalPlan",
"[",
"\"components\"",
"]",
"[",
"spout_name",
"]",
"=",
"{",
"\"config\"",
":",
"convert_pb_kvs",
"(",
"spout",
".",
"comp",
".",
"config",
".",
"kvs",
")",
"}",
"for",
"bolt",
"in",
"bolts",
":",
"bolt_name",
"=",
"bolt",
".",
"comp",
".",
"name",
"physicalPlan",
"[",
"\"bolts\"",
"]",
"[",
"bolt_name",
"]",
"=",
"[",
"]",
"if",
"bolt_name",
"not",
"in",
"physicalPlan",
"[",
"\"components\"",
"]",
":",
"physicalPlan",
"[",
"\"components\"",
"]",
"[",
"bolt_name",
"]",
"=",
"{",
"\"config\"",
":",
"convert_pb_kvs",
"(",
"bolt",
".",
"comp",
".",
"config",
".",
"kvs",
")",
"}",
"for",
"stmgr",
"in",
"stmgrs",
":",
"host",
"=",
"stmgr",
".",
"host_name",
"cwd",
"=",
"stmgr",
".",
"cwd",
"shell_port",
"=",
"stmgr",
".",
"shell_port",
"if",
"stmgr",
".",
"HasField",
"(",
"\"shell_port\"",
")",
"else",
"None",
"physicalPlan",
"[",
"\"stmgrs\"",
"]",
"[",
"stmgr",
".",
"id",
"]",
"=",
"{",
"\"id\"",
":",
"stmgr",
".",
"id",
",",
"\"host\"",
":",
"host",
",",
"\"port\"",
":",
"stmgr",
".",
"data_port",
",",
"\"shell_port\"",
":",
"shell_port",
",",
"\"cwd\"",
":",
"cwd",
",",
"\"pid\"",
":",
"stmgr",
".",
"pid",
",",
"\"joburl\"",
":",
"utils",
".",
"make_shell_job_url",
"(",
"host",
",",
"shell_port",
",",
"cwd",
")",
",",
"\"logfiles\"",
":",
"utils",
".",
"make_shell_logfiles_url",
"(",
"host",
",",
"shell_port",
",",
"cwd",
")",
",",
"\"instance_ids\"",
":",
"[",
"]",
"}",
"instance_groups",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"instance",
"in",
"instances",
":",
"instance_id",
"=",
"instance",
".",
"instance_id",
"stmgrId",
"=",
"instance",
".",
"stmgr_id",
"name",
"=",
"instance",
".",
"info",
".",
"component_name",
"stmgrInfo",
"=",
"physicalPlan",
"[",
"\"stmgrs\"",
"]",
"[",
"stmgrId",
"]",
"host",
"=",
"stmgrInfo",
"[",
"\"host\"",
"]",
"cwd",
"=",
"stmgrInfo",
"[",
"\"cwd\"",
"]",
"shell_port",
"=",
"stmgrInfo",
"[",
"\"shell_port\"",
"]",
"# instance_id format container_<index>_component_1",
"# group name is container_<index>",
"group_name",
"=",
"instance_id",
".",
"rsplit",
"(",
"\"_\"",
",",
"2",
")",
"[",
"0",
"]",
"igroup",
"=",
"instance_groups",
".",
"get",
"(",
"group_name",
",",
"list",
"(",
")",
")",
"igroup",
".",
"append",
"(",
"instance_id",
")",
"instance_groups",
"[",
"group_name",
"]",
"=",
"igroup",
"physicalPlan",
"[",
"\"instances\"",
"]",
"[",
"instance_id",
"]",
"=",
"{",
"\"id\"",
":",
"instance_id",
",",
"\"name\"",
":",
"name",
",",
"\"stmgrId\"",
":",
"stmgrId",
",",
"\"logfile\"",
":",
"utils",
".",
"make_shell_logfiles_url",
"(",
"host",
",",
"shell_port",
",",
"cwd",
",",
"instance",
".",
"instance_id",
")",
",",
"}",
"physicalPlan",
"[",
"\"stmgrs\"",
"]",
"[",
"stmgrId",
"]",
"[",
"\"instance_ids\"",
"]",
".",
"append",
"(",
"instance_id",
")",
"if",
"name",
"in",
"physicalPlan",
"[",
"\"spouts\"",
"]",
":",
"physicalPlan",
"[",
"\"spouts\"",
"]",
"[",
"name",
"]",
".",
"append",
"(",
"instance_id",
")",
"else",
":",
"physicalPlan",
"[",
"\"bolts\"",
"]",
"[",
"name",
"]",
".",
"append",
"(",
"instance_id",
")",
"physicalPlan",
"[",
"\"instance_groups\"",
"]",
"=",
"instance_groups",
"return",
"physicalPlan"
] | 31.557895 | 18.421053 |
def get_repos(self):
"""
Gets the repos for the organization and builds the URL/headers for
getting timestamps of stargazers.
"""
print 'Getting repos.'
#Uses the developer API. Note this could change.
headers = {'Accept': 'application/vnd.github.v3.star+json', 'Authorization': 'token ' + self.token}
temp_count = 0
for repo in self.org_retrieved.iter_repos():
temp_count += 1
url = ('https://api.github.com/repos/' + self.organization_name + '/' + repo.name)
self.repos[repo.name] = self.get_stargazers(url=url, headers=headers)
self.calc_stargazers(start_count=650)
print 'total count: \t' + str(self.total_count)
print str(temp_count) + ' repos'
|
[
"def",
"get_repos",
"(",
"self",
")",
":",
"print",
"'Getting repos.'",
"#Uses the developer API. Note this could change.",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/vnd.github.v3.star+json'",
",",
"'Authorization'",
":",
"'token '",
"+",
"self",
".",
"token",
"}",
"temp_count",
"=",
"0",
"for",
"repo",
"in",
"self",
".",
"org_retrieved",
".",
"iter_repos",
"(",
")",
":",
"temp_count",
"+=",
"1",
"url",
"=",
"(",
"'https://api.github.com/repos/'",
"+",
"self",
".",
"organization_name",
"+",
"'/'",
"+",
"repo",
".",
"name",
")",
"self",
".",
"repos",
"[",
"repo",
".",
"name",
"]",
"=",
"self",
".",
"get_stargazers",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
")",
"self",
".",
"calc_stargazers",
"(",
"start_count",
"=",
"650",
")",
"print",
"'total count: \\t'",
"+",
"str",
"(",
"self",
".",
"total_count",
")",
"print",
"str",
"(",
"temp_count",
")",
"+",
"' repos'"
] | 45.058824 | 20.352941 |
def run(self):
'''
Run the logic for saltkey
'''
self._update_opts()
cmd = self.opts['fun']
veri = None
ret = None
try:
if cmd in ('accept', 'reject', 'delete'):
ret = self._run_cmd('name_match')
if not isinstance(ret, dict):
salt.output.display_output(ret, 'key', opts=self.opts)
return ret
ret = self._filter_ret(cmd, ret)
if not ret:
self._print_no_match(cmd, self.opts['match'])
return
print('The following keys are going to be {0}ed:'.format(cmd.rstrip('e')))
salt.output.display_output(ret, 'key', opts=self.opts)
if not self.opts.get('yes', False):
try:
if cmd.startswith('delete'):
veri = input('Proceed? [N/y] ')
if not veri:
veri = 'n'
else:
veri = input('Proceed? [n/Y] ')
if not veri:
veri = 'y'
except KeyboardInterrupt:
raise SystemExit("\nExiting on CTRL-c")
# accept/reject/delete the same keys we're printed to the user
self.opts['match_dict'] = ret
self.opts.pop('match', None)
list_ret = ret
if veri is None or veri.lower().startswith('y'):
ret = self._run_cmd(cmd)
if cmd in ('accept', 'reject', 'delete'):
if cmd == 'delete':
ret = list_ret
for minions in ret.values():
for minion in minions:
print('Key for minion {0} {1}ed.'.format(minion,
cmd.rstrip('e')))
elif isinstance(ret, dict):
salt.output.display_output(ret, 'key', opts=self.opts)
else:
salt.output.display_output({'return': ret}, 'key', opts=self.opts)
except salt.exceptions.SaltException as exc:
ret = '{0}'.format(exc)
if not self.opts.get('quiet', False):
salt.output.display_output(ret, 'nested', self.opts)
return ret
|
[
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"_update_opts",
"(",
")",
"cmd",
"=",
"self",
".",
"opts",
"[",
"'fun'",
"]",
"veri",
"=",
"None",
"ret",
"=",
"None",
"try",
":",
"if",
"cmd",
"in",
"(",
"'accept'",
",",
"'reject'",
",",
"'delete'",
")",
":",
"ret",
"=",
"self",
".",
"_run_cmd",
"(",
"'name_match'",
")",
"if",
"not",
"isinstance",
"(",
"ret",
",",
"dict",
")",
":",
"salt",
".",
"output",
".",
"display_output",
"(",
"ret",
",",
"'key'",
",",
"opts",
"=",
"self",
".",
"opts",
")",
"return",
"ret",
"ret",
"=",
"self",
".",
"_filter_ret",
"(",
"cmd",
",",
"ret",
")",
"if",
"not",
"ret",
":",
"self",
".",
"_print_no_match",
"(",
"cmd",
",",
"self",
".",
"opts",
"[",
"'match'",
"]",
")",
"return",
"print",
"(",
"'The following keys are going to be {0}ed:'",
".",
"format",
"(",
"cmd",
".",
"rstrip",
"(",
"'e'",
")",
")",
")",
"salt",
".",
"output",
".",
"display_output",
"(",
"ret",
",",
"'key'",
",",
"opts",
"=",
"self",
".",
"opts",
")",
"if",
"not",
"self",
".",
"opts",
".",
"get",
"(",
"'yes'",
",",
"False",
")",
":",
"try",
":",
"if",
"cmd",
".",
"startswith",
"(",
"'delete'",
")",
":",
"veri",
"=",
"input",
"(",
"'Proceed? [N/y] '",
")",
"if",
"not",
"veri",
":",
"veri",
"=",
"'n'",
"else",
":",
"veri",
"=",
"input",
"(",
"'Proceed? [n/Y] '",
")",
"if",
"not",
"veri",
":",
"veri",
"=",
"'y'",
"except",
"KeyboardInterrupt",
":",
"raise",
"SystemExit",
"(",
"\"\\nExiting on CTRL-c\"",
")",
"# accept/reject/delete the same keys we're printed to the user",
"self",
".",
"opts",
"[",
"'match_dict'",
"]",
"=",
"ret",
"self",
".",
"opts",
".",
"pop",
"(",
"'match'",
",",
"None",
")",
"list_ret",
"=",
"ret",
"if",
"veri",
"is",
"None",
"or",
"veri",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'y'",
")",
":",
"ret",
"=",
"self",
".",
"_run_cmd",
"(",
"cmd",
")",
"if",
"cmd",
"in",
"(",
"'accept'",
",",
"'reject'",
",",
"'delete'",
")",
":",
"if",
"cmd",
"==",
"'delete'",
":",
"ret",
"=",
"list_ret",
"for",
"minions",
"in",
"ret",
".",
"values",
"(",
")",
":",
"for",
"minion",
"in",
"minions",
":",
"print",
"(",
"'Key for minion {0} {1}ed.'",
".",
"format",
"(",
"minion",
",",
"cmd",
".",
"rstrip",
"(",
"'e'",
")",
")",
")",
"elif",
"isinstance",
"(",
"ret",
",",
"dict",
")",
":",
"salt",
".",
"output",
".",
"display_output",
"(",
"ret",
",",
"'key'",
",",
"opts",
"=",
"self",
".",
"opts",
")",
"else",
":",
"salt",
".",
"output",
".",
"display_output",
"(",
"{",
"'return'",
":",
"ret",
"}",
",",
"'key'",
",",
"opts",
"=",
"self",
".",
"opts",
")",
"except",
"salt",
".",
"exceptions",
".",
"SaltException",
"as",
"exc",
":",
"ret",
"=",
"'{0}'",
".",
"format",
"(",
"exc",
")",
"if",
"not",
"self",
".",
"opts",
".",
"get",
"(",
"'quiet'",
",",
"False",
")",
":",
"salt",
".",
"output",
".",
"display_output",
"(",
"ret",
",",
"'nested'",
",",
"self",
".",
"opts",
")",
"return",
"ret"
] | 42.649123 | 17.666667 |
def get_teams(self):
"""Get teams."""
if self._cache['teams']:
return self._cache['teams']
teams = []
for j, player in enumerate(self._header.initial.players):
added = False
for i in range(0, len(self._header.initial.players)):
if player.attributes.my_diplomacy[i] == 'ally':
inner_team = False
outer_team = False
new_team = True
for t, tl in enumerate(teams):
if j in tl or i in tl:
new_team = False
if j in tl and i not in tl:
inner_team = t
break
if j not in tl and i in tl:
outer_team = t
break
if new_team:
teams.append([i, j])
if inner_team is not False:
teams[inner_team].append(i)
if outer_team is not False:
teams[outer_team].append(j)
added = True
if not added and j != 0:
teams.append([j])
self._cache['teams'] = teams
return teams
|
[
"def",
"get_teams",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cache",
"[",
"'teams'",
"]",
":",
"return",
"self",
".",
"_cache",
"[",
"'teams'",
"]",
"teams",
"=",
"[",
"]",
"for",
"j",
",",
"player",
"in",
"enumerate",
"(",
"self",
".",
"_header",
".",
"initial",
".",
"players",
")",
":",
"added",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_header",
".",
"initial",
".",
"players",
")",
")",
":",
"if",
"player",
".",
"attributes",
".",
"my_diplomacy",
"[",
"i",
"]",
"==",
"'ally'",
":",
"inner_team",
"=",
"False",
"outer_team",
"=",
"False",
"new_team",
"=",
"True",
"for",
"t",
",",
"tl",
"in",
"enumerate",
"(",
"teams",
")",
":",
"if",
"j",
"in",
"tl",
"or",
"i",
"in",
"tl",
":",
"new_team",
"=",
"False",
"if",
"j",
"in",
"tl",
"and",
"i",
"not",
"in",
"tl",
":",
"inner_team",
"=",
"t",
"break",
"if",
"j",
"not",
"in",
"tl",
"and",
"i",
"in",
"tl",
":",
"outer_team",
"=",
"t",
"break",
"if",
"new_team",
":",
"teams",
".",
"append",
"(",
"[",
"i",
",",
"j",
"]",
")",
"if",
"inner_team",
"is",
"not",
"False",
":",
"teams",
"[",
"inner_team",
"]",
".",
"append",
"(",
"i",
")",
"if",
"outer_team",
"is",
"not",
"False",
":",
"teams",
"[",
"outer_team",
"]",
".",
"append",
"(",
"j",
")",
"added",
"=",
"True",
"if",
"not",
"added",
"and",
"j",
"!=",
"0",
":",
"teams",
".",
"append",
"(",
"[",
"j",
"]",
")",
"self",
".",
"_cache",
"[",
"'teams'",
"]",
"=",
"teams",
"return",
"teams"
] | 40.09375 | 9.34375 |
def print_roi(self, loglevel=logging.INFO):
"""Print information about the spectral and spatial properties
of the ROI (sources, diffuse components)."""
self.logger.log(loglevel, '\n' + str(self.roi))
|
[
"def",
"print_roi",
"(",
"self",
",",
"loglevel",
"=",
"logging",
".",
"INFO",
")",
":",
"self",
".",
"logger",
".",
"log",
"(",
"loglevel",
",",
"'\\n'",
"+",
"str",
"(",
"self",
".",
"roi",
")",
")"
] | 55 | 4.5 |
def write_amendment(self, amendment_id, file_content, branch, author):
"""Given an amendment_id, temporary filename of content, branch and auth_info
Deprecated but needed until we merge api local-dep to master...
"""
gh_user = branch.split('_amendment_')[0]
msg = "Update Amendment '%s' via OpenTree API" % amendment_id
return self.write_document(gh_user,
amendment_id,
file_content,
branch, author,
commit_msg=msg)
|
[
"def",
"write_amendment",
"(",
"self",
",",
"amendment_id",
",",
"file_content",
",",
"branch",
",",
"author",
")",
":",
"gh_user",
"=",
"branch",
".",
"split",
"(",
"'_amendment_'",
")",
"[",
"0",
"]",
"msg",
"=",
"\"Update Amendment '%s' via OpenTree API\"",
"%",
"amendment_id",
"return",
"self",
".",
"write_document",
"(",
"gh_user",
",",
"amendment_id",
",",
"file_content",
",",
"branch",
",",
"author",
",",
"commit_msg",
"=",
"msg",
")"
] | 45.615385 | 16.692308 |
def loadAddressbyPrefix(self, prefix, type, network_id, callback=None, errback=None):
"""
Load an existing address by prefix, type and network into a high level Address object
:param str prefix: CIDR prefix of an existing Address
:param str type: Type of address assignement (planned, assignment or host)
:param int network_id: network_id associated with the address
"""
import ns1.ipam
network = ns1.ipam.Network(self.config, id=network_id).load()
address = ns1.ipam.Address(self.config, prefix=prefix, type=type, network=network)
return address.load(callback=callback, errback=errback)
|
[
"def",
"loadAddressbyPrefix",
"(",
"self",
",",
"prefix",
",",
"type",
",",
"network_id",
",",
"callback",
"=",
"None",
",",
"errback",
"=",
"None",
")",
":",
"import",
"ns1",
".",
"ipam",
"network",
"=",
"ns1",
".",
"ipam",
".",
"Network",
"(",
"self",
".",
"config",
",",
"id",
"=",
"network_id",
")",
".",
"load",
"(",
")",
"address",
"=",
"ns1",
".",
"ipam",
".",
"Address",
"(",
"self",
".",
"config",
",",
"prefix",
"=",
"prefix",
",",
"type",
"=",
"type",
",",
"network",
"=",
"network",
")",
"return",
"address",
".",
"load",
"(",
"callback",
"=",
"callback",
",",
"errback",
"=",
"errback",
")"
] | 54.75 | 29.083333 |
def ValidateAccessAndSubjects(requested_access, subjects):
"""Does basic requested access validation.
Args:
requested_access: String consisting or 'r', 'w' and 'q' characters.
subjects: A list of subjects that are about to be accessed with a given
requested_access. Used for logging purposes only.
Returns:
True if requested_access is valid.
Raises:
access_control.UnauthorizedAccess: if requested_access is not valid.
ValueError: if subjects list is empty.
"""
if not requested_access:
raise access_control.UnauthorizedAccess(
"Must specify requested access type for %s" % subjects)
for s in requested_access:
if s not in "rwq":
raise ValueError(
"Invalid access requested for %s: %s" % (subjects, requested_access))
if "q" in requested_access and "r" not in requested_access:
raise access_control.UnauthorizedAccess(
"Invalid access request: query permissions require read permissions "
"for %s" % subjects,
requested_access=requested_access)
return True
|
[
"def",
"ValidateAccessAndSubjects",
"(",
"requested_access",
",",
"subjects",
")",
":",
"if",
"not",
"requested_access",
":",
"raise",
"access_control",
".",
"UnauthorizedAccess",
"(",
"\"Must specify requested access type for %s\"",
"%",
"subjects",
")",
"for",
"s",
"in",
"requested_access",
":",
"if",
"s",
"not",
"in",
"\"rwq\"",
":",
"raise",
"ValueError",
"(",
"\"Invalid access requested for %s: %s\"",
"%",
"(",
"subjects",
",",
"requested_access",
")",
")",
"if",
"\"q\"",
"in",
"requested_access",
"and",
"\"r\"",
"not",
"in",
"requested_access",
":",
"raise",
"access_control",
".",
"UnauthorizedAccess",
"(",
"\"Invalid access request: query permissions require read permissions \"",
"\"for %s\"",
"%",
"subjects",
",",
"requested_access",
"=",
"requested_access",
")",
"return",
"True"
] | 32.375 | 23.09375 |
def amdf(lag, size):
"""
Average Magnitude Difference Function non-linear filter for a given
size and a fixed lag.
Parameters
----------
lag :
Time lag, in samples. See ``freq2lag`` if needs conversion from
frequency values.
size :
Moving average size.
Returns
-------
A callable that accepts two parameters: a signal ``sig`` and the starting
memory element ``zero`` that behaves like the ``LinearFilter.__call__``
arguments. The output from that callable is a Stream instance, and has
no decimation applied.
See Also
--------
freq2lag :
Frequency (in rad/sample) to lag (in samples) converter.
"""
filt = (1 - z ** -lag).linearize()
@tostream
def amdf_filter(sig, zero=0.):
return maverage(size)(abs(filt(sig, zero=zero)), zero=zero)
return amdf_filter
|
[
"def",
"amdf",
"(",
"lag",
",",
"size",
")",
":",
"filt",
"=",
"(",
"1",
"-",
"z",
"**",
"-",
"lag",
")",
".",
"linearize",
"(",
")",
"@",
"tostream",
"def",
"amdf_filter",
"(",
"sig",
",",
"zero",
"=",
"0.",
")",
":",
"return",
"maverage",
"(",
"size",
")",
"(",
"abs",
"(",
"filt",
"(",
"sig",
",",
"zero",
"=",
"zero",
")",
")",
",",
"zero",
"=",
"zero",
")",
"return",
"amdf_filter"
] | 23.909091 | 26.030303 |
def _binary_arithemtic(self, left, binary, right):
"""
Parameters
----------
operand: Column object, integer or float
Value on which to apply operator to this column
binary: char
binary arithmetic operator (-, +, *, /, ^, %)
Returns
-------
self
Notes
-----
Returning self will allow the next object to use this column ops and
concatenate something else
"""
if isinstance(right, (int, float)):
right = right
elif isinstance(right, Column):
right = right.execution_name
else:
raise AttributeError(
"{} can only be used ".format(binary)
+ "with integer, float or column")
if isinstance(left, (int, float)):
left = left
elif isinstance(left, Column):
left = left.execution_name
else:
raise AttributeError(
"{} can only be used ".format(binary)
+ "with integer, float or column")
copy = self.copy()
copy.query.removeSELECT("{}".format(copy.execution_name))
if binary == '^': # POWER needs a different treatment
copy.execution_name = "pow({},{})".format(left, right)
else:
copy.execution_name = "{}{}{}".format(left, binary, right)
copy.query.addSELECT(copy.execution_name)
return copy
|
[
"def",
"_binary_arithemtic",
"(",
"self",
",",
"left",
",",
"binary",
",",
"right",
")",
":",
"if",
"isinstance",
"(",
"right",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"right",
"=",
"right",
"elif",
"isinstance",
"(",
"right",
",",
"Column",
")",
":",
"right",
"=",
"right",
".",
"execution_name",
"else",
":",
"raise",
"AttributeError",
"(",
"\"{} can only be used \"",
".",
"format",
"(",
"binary",
")",
"+",
"\"with integer, float or column\"",
")",
"if",
"isinstance",
"(",
"left",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"left",
"=",
"left",
"elif",
"isinstance",
"(",
"left",
",",
"Column",
")",
":",
"left",
"=",
"left",
".",
"execution_name",
"else",
":",
"raise",
"AttributeError",
"(",
"\"{} can only be used \"",
".",
"format",
"(",
"binary",
")",
"+",
"\"with integer, float or column\"",
")",
"copy",
"=",
"self",
".",
"copy",
"(",
")",
"copy",
".",
"query",
".",
"removeSELECT",
"(",
"\"{}\"",
".",
"format",
"(",
"copy",
".",
"execution_name",
")",
")",
"if",
"binary",
"==",
"'^'",
":",
"# POWER needs a different treatment",
"copy",
".",
"execution_name",
"=",
"\"pow({},{})\"",
".",
"format",
"(",
"left",
",",
"right",
")",
"else",
":",
"copy",
".",
"execution_name",
"=",
"\"{}{}{}\"",
".",
"format",
"(",
"left",
",",
"binary",
",",
"right",
")",
"copy",
".",
"query",
".",
"addSELECT",
"(",
"copy",
".",
"execution_name",
")",
"return",
"copy"
] | 31.688889 | 18.266667 |
def login(self, url=None, api_key=None, login=None, pwd=None,
api_version=None, timeout=None, verify=True, alt_filepath=None,
domain=None, **kwargs):
"""
Login to SMC API and retrieve a valid session.
Sessions use a pool connection manager to provide dynamic scalability
during times of increased load. Each session is managed by a global
session manager making it possible to have more than one session per
interpreter.
An example login and logout session::
from smc import session
session.login(url='http://1.1.1.1:8082', api_key='SomeSMCG3ener@t3dPwd')
.....do stuff.....
session.logout()
:param str url: ip of SMC management server
:param str api_key: API key created for api client in SMC
:param str login: Administrator user in SMC that has privilege to SMC API.
:param str pwd: Password for user login.
:param api_version (optional): specify api version
:param int timeout: (optional): specify a timeout for initial connect; (default 10)
:param str|boolean verify: verify SSL connections using cert (default: verify=True)
You can pass verify the path to a CA_BUNDLE file or directory with certificates
of trusted CAs
:param str alt_filepath: If using .smcrc, alternate path+filename
:param str domain: domain to log in to. If domains are not configured, this
field will be ignored and api client logged in to 'Shared Domain'.
:param bool retry_on_busy: pass as kwarg with boolean if you want to add retries
if the SMC returns HTTP 503 error during operation. You can also optionally customize
this behavior and call :meth:`.set_retry_on_busy`
:raises ConfigLoadError: loading cfg from ~.smcrc fails
For SSL connections, you can disable validation of the SMC SSL certificate by setting
verify=False, however this is not a recommended practice.
If you want to use the SSL certificate generated and used by the SMC API server
for validation, set verify='path_to_my_dot_pem'. It is also recommended that your
certificate has subjectAltName defined per RFC 2818
If SSL warnings are thrown in debug output, see:
https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
Logout should be called to remove the session immediately from the
SMC server.
.. note:: As of SMC 6.4 it is possible to give a standard Administrative user
access to the SMC API. It is still possible to use an API Client by
providing the api_key in the login call.
"""
params = {}
if not url or (not api_key and not (login and pwd)):
try: # First try load from file
params = load_from_file(alt_filepath) if alt_filepath\
is not None else load_from_file()
logger.debug('Read config data from file: %s', params)
except ConfigLoadError:
# Last ditch effort, try to load from environment
params = load_from_environ()
logger.debug('Read config data from environ: %s', params)
params = params or dict(
url=url,
api_key=api_key,
login=login,
pwd=pwd,
api_version=api_version,
verify=verify,
timeout=timeout,
domain=domain,
kwargs=kwargs or {})
# Check to see this session is already logged in. If so, return.
# The session object represents a single connection. Log out to
# re-use the same session object or get_session() from the
# SessionManager to track multiple sessions.
if self.manager and (self.session and self in self.manager):
logger.info('An attempt to log in occurred when a session already '
'exists, bypassing login for session: %s' % self)
return
self._params = {k: v for k, v in params.items() if v is not None}
verify_ssl = self._params.get('verify', True)
# Determine and set the API version we will use.
self._params.update(
api_version=get_api_version(
self.url, self.api_version, self.timeout, verify_ssl))
extra_args = self._params.get('kwargs', {})
# Retries configured
retry_on_busy = extra_args.pop('retry_on_busy', False)
request = self._build_auth_request(verify_ssl, **extra_args)
# This will raise if session login fails...
self._session = self._get_session(request)
self.session.verify = verify_ssl
if retry_on_busy:
self.set_retry_on_busy()
# Load entry points
load_entry_points(self)
# Put session in manager
self.manager._register(self)
logger.debug('Login succeeded for admin: %s in domain: %s, session: %s',
self.name, self.domain, self.session_id)
|
[
"def",
"login",
"(",
"self",
",",
"url",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"login",
"=",
"None",
",",
"pwd",
"=",
"None",
",",
"api_version",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"verify",
"=",
"True",
",",
"alt_filepath",
"=",
"None",
",",
"domain",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"}",
"if",
"not",
"url",
"or",
"(",
"not",
"api_key",
"and",
"not",
"(",
"login",
"and",
"pwd",
")",
")",
":",
"try",
":",
"# First try load from file",
"params",
"=",
"load_from_file",
"(",
"alt_filepath",
")",
"if",
"alt_filepath",
"is",
"not",
"None",
"else",
"load_from_file",
"(",
")",
"logger",
".",
"debug",
"(",
"'Read config data from file: %s'",
",",
"params",
")",
"except",
"ConfigLoadError",
":",
"# Last ditch effort, try to load from environment",
"params",
"=",
"load_from_environ",
"(",
")",
"logger",
".",
"debug",
"(",
"'Read config data from environ: %s'",
",",
"params",
")",
"params",
"=",
"params",
"or",
"dict",
"(",
"url",
"=",
"url",
",",
"api_key",
"=",
"api_key",
",",
"login",
"=",
"login",
",",
"pwd",
"=",
"pwd",
",",
"api_version",
"=",
"api_version",
",",
"verify",
"=",
"verify",
",",
"timeout",
"=",
"timeout",
",",
"domain",
"=",
"domain",
",",
"kwargs",
"=",
"kwargs",
"or",
"{",
"}",
")",
"# Check to see this session is already logged in. If so, return.",
"# The session object represents a single connection. Log out to",
"# re-use the same session object or get_session() from the",
"# SessionManager to track multiple sessions.",
"if",
"self",
".",
"manager",
"and",
"(",
"self",
".",
"session",
"and",
"self",
"in",
"self",
".",
"manager",
")",
":",
"logger",
".",
"info",
"(",
"'An attempt to log in occurred when a session already '",
"'exists, bypassing login for session: %s'",
"%",
"self",
")",
"return",
"self",
".",
"_params",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"}",
"verify_ssl",
"=",
"self",
".",
"_params",
".",
"get",
"(",
"'verify'",
",",
"True",
")",
"# Determine and set the API version we will use.",
"self",
".",
"_params",
".",
"update",
"(",
"api_version",
"=",
"get_api_version",
"(",
"self",
".",
"url",
",",
"self",
".",
"api_version",
",",
"self",
".",
"timeout",
",",
"verify_ssl",
")",
")",
"extra_args",
"=",
"self",
".",
"_params",
".",
"get",
"(",
"'kwargs'",
",",
"{",
"}",
")",
"# Retries configured",
"retry_on_busy",
"=",
"extra_args",
".",
"pop",
"(",
"'retry_on_busy'",
",",
"False",
")",
"request",
"=",
"self",
".",
"_build_auth_request",
"(",
"verify_ssl",
",",
"*",
"*",
"extra_args",
")",
"# This will raise if session login fails...",
"self",
".",
"_session",
"=",
"self",
".",
"_get_session",
"(",
"request",
")",
"self",
".",
"session",
".",
"verify",
"=",
"verify_ssl",
"if",
"retry_on_busy",
":",
"self",
".",
"set_retry_on_busy",
"(",
")",
"# Load entry points",
"load_entry_points",
"(",
"self",
")",
"# Put session in manager",
"self",
".",
"manager",
".",
"_register",
"(",
"self",
")",
"logger",
".",
"debug",
"(",
"'Login succeeded for admin: %s in domain: %s, session: %s'",
",",
"self",
".",
"name",
",",
"self",
".",
"domain",
",",
"self",
".",
"session_id",
")"
] | 45.150442 | 24.495575 |
def _get_iris_args(attrs):
""" Converts the xarray attrs into args that can be passed into Iris
"""
# iris.unit is deprecated in Iris v1.9
import cf_units
args = {'attributes': _filter_attrs(attrs, iris_forbidden_keys)}
args.update(_pick_attrs(attrs, ('standard_name', 'long_name',)))
unit_args = _pick_attrs(attrs, ('calendar',))
if 'units' in attrs:
args['units'] = cf_units.Unit(attrs['units'], **unit_args)
return args
|
[
"def",
"_get_iris_args",
"(",
"attrs",
")",
":",
"# iris.unit is deprecated in Iris v1.9",
"import",
"cf_units",
"args",
"=",
"{",
"'attributes'",
":",
"_filter_attrs",
"(",
"attrs",
",",
"iris_forbidden_keys",
")",
"}",
"args",
".",
"update",
"(",
"_pick_attrs",
"(",
"attrs",
",",
"(",
"'standard_name'",
",",
"'long_name'",
",",
")",
")",
")",
"unit_args",
"=",
"_pick_attrs",
"(",
"attrs",
",",
"(",
"'calendar'",
",",
")",
")",
"if",
"'units'",
"in",
"attrs",
":",
"args",
"[",
"'units'",
"]",
"=",
"cf_units",
".",
"Unit",
"(",
"attrs",
"[",
"'units'",
"]",
",",
"*",
"*",
"unit_args",
")",
"return",
"args"
] | 41.454545 | 15.363636 |
def toosm(self):
"""Generate a OSM node element subtree.
Returns:
etree.Element: OSM node element
"""
node = create_elem('node', {'id': str(self.ident),
'lat': str(self.latitude),
'lon': str(self.longitude)})
node.set('visible', 'true' if self.visible else 'false')
if self.user:
node.set('user', self.user)
if self.timestamp:
node.set('timestamp', self.timestamp.isoformat())
if self.tags:
for key, value in sorted(self.tags.items()):
node.append(create_elem('tag', {'k': key, 'v': value}))
return node
|
[
"def",
"toosm",
"(",
"self",
")",
":",
"node",
"=",
"create_elem",
"(",
"'node'",
",",
"{",
"'id'",
":",
"str",
"(",
"self",
".",
"ident",
")",
",",
"'lat'",
":",
"str",
"(",
"self",
".",
"latitude",
")",
",",
"'lon'",
":",
"str",
"(",
"self",
".",
"longitude",
")",
"}",
")",
"node",
".",
"set",
"(",
"'visible'",
",",
"'true'",
"if",
"self",
".",
"visible",
"else",
"'false'",
")",
"if",
"self",
".",
"user",
":",
"node",
".",
"set",
"(",
"'user'",
",",
"self",
".",
"user",
")",
"if",
"self",
".",
"timestamp",
":",
"node",
".",
"set",
"(",
"'timestamp'",
",",
"self",
".",
"timestamp",
".",
"isoformat",
"(",
")",
")",
"if",
"self",
".",
"tags",
":",
"for",
"key",
",",
"value",
"in",
"sorted",
"(",
"self",
".",
"tags",
".",
"items",
"(",
")",
")",
":",
"node",
".",
"append",
"(",
"create_elem",
"(",
"'tag'",
",",
"{",
"'k'",
":",
"key",
",",
"'v'",
":",
"value",
"}",
")",
")",
"return",
"node"
] | 36.578947 | 19 |
def record_path(self):
'''
If recording is not enabled, return `None` as record path.
'''
if self.record_button.get_property('active') and (self.record_path_selector
.selected_path):
return self.record_path_selector.selected_path
else:
return None
|
[
"def",
"record_path",
"(",
"self",
")",
":",
"if",
"self",
".",
"record_button",
".",
"get_property",
"(",
"'active'",
")",
"and",
"(",
"self",
".",
"record_path_selector",
".",
"selected_path",
")",
":",
"return",
"self",
".",
"record_path_selector",
".",
"selected_path",
"else",
":",
"return",
"None"
] | 40.111111 | 26.777778 |
def zero_pad_data_extend(self, job_data_seg, curr_seg):
"""When using zero padding, *all* data is analysable, but the setup
functions must include the padding data where it is available so that
we are not zero-padding in the middle of science segments. This
function takes a job_data_seg, that is chosen for a particular node
and extends it with segment-start-pad and segment-end-pad if that
data is available.
"""
if self.zero_padding is False:
return job_data_seg
else:
start_pad = int(self.get_opt( 'segment-start-pad'))
end_pad = int(self.get_opt('segment-end-pad'))
new_data_start = max(curr_seg[0], job_data_seg[0] - start_pad)
new_data_end = min(curr_seg[1], job_data_seg[1] + end_pad)
new_data_seg = segments.segment([new_data_start, new_data_end])
return new_data_seg
|
[
"def",
"zero_pad_data_extend",
"(",
"self",
",",
"job_data_seg",
",",
"curr_seg",
")",
":",
"if",
"self",
".",
"zero_padding",
"is",
"False",
":",
"return",
"job_data_seg",
"else",
":",
"start_pad",
"=",
"int",
"(",
"self",
".",
"get_opt",
"(",
"'segment-start-pad'",
")",
")",
"end_pad",
"=",
"int",
"(",
"self",
".",
"get_opt",
"(",
"'segment-end-pad'",
")",
")",
"new_data_start",
"=",
"max",
"(",
"curr_seg",
"[",
"0",
"]",
",",
"job_data_seg",
"[",
"0",
"]",
"-",
"start_pad",
")",
"new_data_end",
"=",
"min",
"(",
"curr_seg",
"[",
"1",
"]",
",",
"job_data_seg",
"[",
"1",
"]",
"+",
"end_pad",
")",
"new_data_seg",
"=",
"segments",
".",
"segment",
"(",
"[",
"new_data_start",
",",
"new_data_end",
"]",
")",
"return",
"new_data_seg"
] | 53.882353 | 20.705882 |
def has_submenu_items(self, current_page, allow_repeating_parents,
original_menu_tag, menu_instance=None, request=None):
"""
When rendering pages in a menu template a `has_children_in_menu`
attribute is added to each page, letting template developers know
whether or not the item has a submenu that must be rendered.
By default, we return a boolean indicating whether the page has
suitable child pages to include in such a menu. But, if you are
overriding the `modify_submenu_items` method to programatically add
items that aren't child pages, you'll likely need to alter this method
too, so the template knows there are sub items to be rendered.
"""
return menu_instance.page_has_children(self)
|
[
"def",
"has_submenu_items",
"(",
"self",
",",
"current_page",
",",
"allow_repeating_parents",
",",
"original_menu_tag",
",",
"menu_instance",
"=",
"None",
",",
"request",
"=",
"None",
")",
":",
"return",
"menu_instance",
".",
"page_has_children",
"(",
"self",
")"
] | 56.928571 | 26.785714 |
def isSequence(arg):
"""Check if input is iterable."""
if hasattr(arg, "strip"):
return False
if hasattr(arg, "__getslice__"):
return True
if hasattr(arg, "__iter__"):
return True
return False
|
[
"def",
"isSequence",
"(",
"arg",
")",
":",
"if",
"hasattr",
"(",
"arg",
",",
"\"strip\"",
")",
":",
"return",
"False",
"if",
"hasattr",
"(",
"arg",
",",
"\"__getslice__\"",
")",
":",
"return",
"True",
"if",
"hasattr",
"(",
"arg",
",",
"\"__iter__\"",
")",
":",
"return",
"True",
"return",
"False"
] | 25.333333 | 14.333333 |
def filter_examples(self, field_names):
"""Remove unknown words from dataset examples with respect to given field.
Arguments:
field_names (list(str)): Within example only the parts with field names in
field_names will have their unknown words deleted.
"""
for i, example in enumerate(self.examples):
for field_name in field_names:
vocab = set(self.fields[field_name].vocab.stoi)
text = getattr(example, field_name)
example_part = [word for word in text if word in vocab]
setattr(example, field_name, example_part)
self.examples[i] = example
|
[
"def",
"filter_examples",
"(",
"self",
",",
"field_names",
")",
":",
"for",
"i",
",",
"example",
"in",
"enumerate",
"(",
"self",
".",
"examples",
")",
":",
"for",
"field_name",
"in",
"field_names",
":",
"vocab",
"=",
"set",
"(",
"self",
".",
"fields",
"[",
"field_name",
"]",
".",
"vocab",
".",
"stoi",
")",
"text",
"=",
"getattr",
"(",
"example",
",",
"field_name",
")",
"example_part",
"=",
"[",
"word",
"for",
"word",
"in",
"text",
"if",
"word",
"in",
"vocab",
"]",
"setattr",
"(",
"example",
",",
"field_name",
",",
"example_part",
")",
"self",
".",
"examples",
"[",
"i",
"]",
"=",
"example"
] | 48.285714 | 16.642857 |
def create_user_task(sender=None, body=None, **kwargs): # pylint: disable=unused-argument
"""
Create a :py:class:`UserTaskStatus` record for each :py:class:`UserTaskMixin`.
Also creates a :py:class:`UserTaskStatus` for each chain, chord, or group containing
the new :py:class:`UserTaskMixin`.
"""
try:
task_class = import_string(sender)
except ImportError:
return
if issubclass(task_class.__class__, UserTaskMixin):
arguments_dict = task_class.arguments_as_dict(*body['args'], **body['kwargs'])
user_id = _get_user_id(arguments_dict)
task_id = body['id']
if body.get('callbacks', []):
return _create_chain_entry(user_id, task_id, task_class, body['args'], body['kwargs'], body['callbacks'])
if body.get('chord', None):
return _create_chord_entry(task_id, task_class, body, user_id)
parent = _get_or_create_group_parent(body, user_id)
name = task_class.generate_name(arguments_dict)
total_steps = task_class.calculate_total_steps(arguments_dict)
UserTaskStatus.objects.get_or_create(
task_id=task_id, defaults={'user_id': user_id, 'parent': parent, 'name': name, 'task_class': sender,
'total_steps': total_steps})
if parent:
parent.increment_total_steps(total_steps)
|
[
"def",
"create_user_task",
"(",
"sender",
"=",
"None",
",",
"body",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"try",
":",
"task_class",
"=",
"import_string",
"(",
"sender",
")",
"except",
"ImportError",
":",
"return",
"if",
"issubclass",
"(",
"task_class",
".",
"__class__",
",",
"UserTaskMixin",
")",
":",
"arguments_dict",
"=",
"task_class",
".",
"arguments_as_dict",
"(",
"*",
"body",
"[",
"'args'",
"]",
",",
"*",
"*",
"body",
"[",
"'kwargs'",
"]",
")",
"user_id",
"=",
"_get_user_id",
"(",
"arguments_dict",
")",
"task_id",
"=",
"body",
"[",
"'id'",
"]",
"if",
"body",
".",
"get",
"(",
"'callbacks'",
",",
"[",
"]",
")",
":",
"return",
"_create_chain_entry",
"(",
"user_id",
",",
"task_id",
",",
"task_class",
",",
"body",
"[",
"'args'",
"]",
",",
"body",
"[",
"'kwargs'",
"]",
",",
"body",
"[",
"'callbacks'",
"]",
")",
"if",
"body",
".",
"get",
"(",
"'chord'",
",",
"None",
")",
":",
"return",
"_create_chord_entry",
"(",
"task_id",
",",
"task_class",
",",
"body",
",",
"user_id",
")",
"parent",
"=",
"_get_or_create_group_parent",
"(",
"body",
",",
"user_id",
")",
"name",
"=",
"task_class",
".",
"generate_name",
"(",
"arguments_dict",
")",
"total_steps",
"=",
"task_class",
".",
"calculate_total_steps",
"(",
"arguments_dict",
")",
"UserTaskStatus",
".",
"objects",
".",
"get_or_create",
"(",
"task_id",
"=",
"task_id",
",",
"defaults",
"=",
"{",
"'user_id'",
":",
"user_id",
",",
"'parent'",
":",
"parent",
",",
"'name'",
":",
"name",
",",
"'task_class'",
":",
"sender",
",",
"'total_steps'",
":",
"total_steps",
"}",
")",
"if",
"parent",
":",
"parent",
".",
"increment_total_steps",
"(",
"total_steps",
")"
] | 50.222222 | 24.444444 |
def auth(self, auth_method, key):
"""
Sets authentication info for current tag
"""
self.method = auth_method
self.key = key
if self.debug:
print("Changing used auth key to " + str(key) + " using method " + ("A" if auth_method == self.rfid.auth_a else "B"))
|
[
"def",
"auth",
"(",
"self",
",",
"auth_method",
",",
"key",
")",
":",
"self",
".",
"method",
"=",
"auth_method",
"self",
".",
"key",
"=",
"key",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"\"Changing used auth key to \"",
"+",
"str",
"(",
"key",
")",
"+",
"\" using method \"",
"+",
"(",
"\"A\"",
"if",
"auth_method",
"==",
"self",
".",
"rfid",
".",
"auth_a",
"else",
"\"B\"",
")",
")"
] | 34.333333 | 20.777778 |
def username_from_request(request):
""" unloads username from default POST request """
if config.USERNAME_FORM_FIELD in request.POST:
return request.POST[config.USERNAME_FORM_FIELD][:255]
return None
|
[
"def",
"username_from_request",
"(",
"request",
")",
":",
"if",
"config",
".",
"USERNAME_FORM_FIELD",
"in",
"request",
".",
"POST",
":",
"return",
"request",
".",
"POST",
"[",
"config",
".",
"USERNAME_FORM_FIELD",
"]",
"[",
":",
"255",
"]",
"return",
"None"
] | 43 | 12.2 |
def main():
"""
NAME
sort_specimens.py
DESCRIPTION
Reads in a pmag_specimen formatted file and separates it into different components (A,B...etc.)
SYNTAX
sort_specimens.py [-h] [command line options]
INPUT
takes pmag_specimens.txt formatted input file
OPTIONS
-h: prints help message and quits
-f FILE: specify input file, default is 'pmag_specimens.txt'
OUTPUT
makes pmag_specimen formatted files with input filename plus _X_Y
where X is the component name and Y is s,g,t for coordinate system
"""
dir_path='.'
inspec="pmag_specimens.txt"
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=sys.argv[ind+1]
basename=inspec.split('.')[:-1]
inspec=dir_path+"/"+inspec
ofile_base=dir_path+"/"+basename[0]
#
# read in data
#
prior_spec_data,file_type=pmag.magic_read(inspec)
if file_type != 'pmag_specimens':
print(file_type, " this is not a valid pmag_specimens file")
sys.exit()
# get list of specimens in file, components, coordinate systems available
specs,comps,coords=[],[],[]
for spec in prior_spec_data:
if spec['er_specimen_name'] not in specs:specs.append(spec['er_specimen_name'])
if 'specimen_comp_name' not in list(spec.keys()):spec['specimen_comp_name']='A'
if 'specimen_tilt_correction' not in list(spec.keys()):spec['tilt_correction']='-1' # assume specimen coordinates
if spec['specimen_comp_name'] not in comps:comps.append(spec['specimen_comp_name'])
if spec['specimen_tilt_correction'] not in coords:coords.append(spec['specimen_tilt_correction'])
# work on separating out components, coordinate systems by specimen
for coord in coords:
print(coord)
for comp in comps:
print(comp)
speclist=[]
for spec in prior_spec_data:
if spec['specimen_tilt_correction']==coord and spec['specimen_comp_name']==comp:speclist.append(spec)
ofile=ofile_base+'_'+coord+'_'+comp+'.txt'
pmag.magic_write(ofile,speclist,'pmag_specimens')
print('coordinate system: ',coord,' component name: ',comp,' saved in ',ofile)
|
[
"def",
"main",
"(",
")",
":",
"dir_path",
"=",
"'.'",
"inspec",
"=",
"\"pmag_specimens.txt\"",
"if",
"'-WD'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-WD'",
")",
"dir_path",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"'-f'",
"in",
"sys",
".",
"argv",
":",
"ind",
"=",
"sys",
".",
"argv",
".",
"index",
"(",
"'-f'",
")",
"inspec",
"=",
"sys",
".",
"argv",
"[",
"ind",
"+",
"1",
"]",
"basename",
"=",
"inspec",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"-",
"1",
"]",
"inspec",
"=",
"dir_path",
"+",
"\"/\"",
"+",
"inspec",
"ofile_base",
"=",
"dir_path",
"+",
"\"/\"",
"+",
"basename",
"[",
"0",
"]",
"#",
"# read in data",
"#",
"prior_spec_data",
",",
"file_type",
"=",
"pmag",
".",
"magic_read",
"(",
"inspec",
")",
"if",
"file_type",
"!=",
"'pmag_specimens'",
":",
"print",
"(",
"file_type",
",",
"\" this is not a valid pmag_specimens file\"",
")",
"sys",
".",
"exit",
"(",
")",
"# get list of specimens in file, components, coordinate systems available",
"specs",
",",
"comps",
",",
"coords",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"spec",
"in",
"prior_spec_data",
":",
"if",
"spec",
"[",
"'er_specimen_name'",
"]",
"not",
"in",
"specs",
":",
"specs",
".",
"append",
"(",
"spec",
"[",
"'er_specimen_name'",
"]",
")",
"if",
"'specimen_comp_name'",
"not",
"in",
"list",
"(",
"spec",
".",
"keys",
"(",
")",
")",
":",
"spec",
"[",
"'specimen_comp_name'",
"]",
"=",
"'A'",
"if",
"'specimen_tilt_correction'",
"not",
"in",
"list",
"(",
"spec",
".",
"keys",
"(",
")",
")",
":",
"spec",
"[",
"'tilt_correction'",
"]",
"=",
"'-1'",
"# assume specimen coordinates",
"if",
"spec",
"[",
"'specimen_comp_name'",
"]",
"not",
"in",
"comps",
":",
"comps",
".",
"append",
"(",
"spec",
"[",
"'specimen_comp_name'",
"]",
")",
"if",
"spec",
"[",
"'specimen_tilt_correction'",
"]",
"not",
"in",
"coords",
":",
"coords",
".",
"append",
"(",
"spec",
"[",
"'specimen_tilt_correction'",
"]",
")",
"# work on separating out components, coordinate systems by specimen",
"for",
"coord",
"in",
"coords",
":",
"print",
"(",
"coord",
")",
"for",
"comp",
"in",
"comps",
":",
"print",
"(",
"comp",
")",
"speclist",
"=",
"[",
"]",
"for",
"spec",
"in",
"prior_spec_data",
":",
"if",
"spec",
"[",
"'specimen_tilt_correction'",
"]",
"==",
"coord",
"and",
"spec",
"[",
"'specimen_comp_name'",
"]",
"==",
"comp",
":",
"speclist",
".",
"append",
"(",
"spec",
")",
"ofile",
"=",
"ofile_base",
"+",
"'_'",
"+",
"coord",
"+",
"'_'",
"+",
"comp",
"+",
"'.txt'",
"pmag",
".",
"magic_write",
"(",
"ofile",
",",
"speclist",
",",
"'pmag_specimens'",
")",
"print",
"(",
"'coordinate system: '",
",",
"coord",
",",
"' component name: '",
",",
"comp",
",",
"' saved in '",
",",
"ofile",
")"
] | 37.758065 | 25.080645 |
def get_snapshots(self, si, logger, vm_uuid):
"""
Restores a virtual machine to a snapshot
:param vim.ServiceInstance si: py_vmomi service instance
:param logger: Logger
:param vm_uuid: uuid of the virtual machine
"""
vm = self.pyvmomi_service.find_by_uuid(si, vm_uuid)
logger.info("Get snapshots")
snapshots = SnapshotRetriever.get_vm_snapshots(vm)
return snapshots.keys()
|
[
"def",
"get_snapshots",
"(",
"self",
",",
"si",
",",
"logger",
",",
"vm_uuid",
")",
":",
"vm",
"=",
"self",
".",
"pyvmomi_service",
".",
"find_by_uuid",
"(",
"si",
",",
"vm_uuid",
")",
"logger",
".",
"info",
"(",
"\"Get snapshots\"",
")",
"snapshots",
"=",
"SnapshotRetriever",
".",
"get_vm_snapshots",
"(",
"vm",
")",
"return",
"snapshots",
".",
"keys",
"(",
")"
] | 36.916667 | 12.416667 |
def classify(self, peer_dir_meta):
"""Classify this entry as 'new', 'unmodified', or 'modified'."""
assert self.classification is None
peer_entry_meta = None
if peer_dir_meta:
# Metadata is generally available, so we can detect 'new' or 'modified'
peer_entry_meta = peer_dir_meta.get(self.name, False)
if self.is_dir():
# Directories are considered 'unmodified' (would require deep traversal
# to check otherwise)
if peer_entry_meta:
self.classification = "unmodified"
else:
self.classification = "new"
elif peer_entry_meta:
# File entries can be classified as modified/unmodified
self.ps_size = peer_entry_meta.get("s")
self.ps_mtime = peer_entry_meta.get("m")
self.ps_utime = peer_entry_meta.get("u")
if (
self.size == self.ps_size
and FileEntry._eps_compare(self.mtime, self.ps_mtime) == 0
):
self.classification = "unmodified"
else:
self.classification = "modified"
else:
# A new file entry
self.classification = "new"
else:
# No metadata available:
if self.is_dir():
# Directories are considered 'unmodified' (would require deep traversal
# to check otherwise)
self.classification = "unmodified"
else:
# That's all we know, but EntryPair.classify() may adjust this
self.classification = "existing"
if PRINT_CLASSIFICATIONS:
write("classify {}".format(self))
assert self.classification in ENTRY_CLASSIFICATIONS
return self.classification
|
[
"def",
"classify",
"(",
"self",
",",
"peer_dir_meta",
")",
":",
"assert",
"self",
".",
"classification",
"is",
"None",
"peer_entry_meta",
"=",
"None",
"if",
"peer_dir_meta",
":",
"# Metadata is generally available, so we can detect 'new' or 'modified'",
"peer_entry_meta",
"=",
"peer_dir_meta",
".",
"get",
"(",
"self",
".",
"name",
",",
"False",
")",
"if",
"self",
".",
"is_dir",
"(",
")",
":",
"# Directories are considered 'unmodified' (would require deep traversal",
"# to check otherwise)",
"if",
"peer_entry_meta",
":",
"self",
".",
"classification",
"=",
"\"unmodified\"",
"else",
":",
"self",
".",
"classification",
"=",
"\"new\"",
"elif",
"peer_entry_meta",
":",
"# File entries can be classified as modified/unmodified",
"self",
".",
"ps_size",
"=",
"peer_entry_meta",
".",
"get",
"(",
"\"s\"",
")",
"self",
".",
"ps_mtime",
"=",
"peer_entry_meta",
".",
"get",
"(",
"\"m\"",
")",
"self",
".",
"ps_utime",
"=",
"peer_entry_meta",
".",
"get",
"(",
"\"u\"",
")",
"if",
"(",
"self",
".",
"size",
"==",
"self",
".",
"ps_size",
"and",
"FileEntry",
".",
"_eps_compare",
"(",
"self",
".",
"mtime",
",",
"self",
".",
"ps_mtime",
")",
"==",
"0",
")",
":",
"self",
".",
"classification",
"=",
"\"unmodified\"",
"else",
":",
"self",
".",
"classification",
"=",
"\"modified\"",
"else",
":",
"# A new file entry",
"self",
".",
"classification",
"=",
"\"new\"",
"else",
":",
"# No metadata available:",
"if",
"self",
".",
"is_dir",
"(",
")",
":",
"# Directories are considered 'unmodified' (would require deep traversal",
"# to check otherwise)",
"self",
".",
"classification",
"=",
"\"unmodified\"",
"else",
":",
"# That's all we know, but EntryPair.classify() may adjust this",
"self",
".",
"classification",
"=",
"\"existing\"",
"if",
"PRINT_CLASSIFICATIONS",
":",
"write",
"(",
"\"classify {}\"",
".",
"format",
"(",
"self",
")",
")",
"assert",
"self",
".",
"classification",
"in",
"ENTRY_CLASSIFICATIONS",
"return",
"self",
".",
"classification"
] | 42.727273 | 16.863636 |
async def reset_wallet(self) -> str:
"""
Close and delete HolderProver wallet, then create and open a replacement on prior link secret.
Note that this operation effectively destroys private keys for credential definitions. Its
intended use is primarily for testing and demonstration.
Raise AbsentLinkSecret if link secret not set.
:return: wallet name
"""
LOGGER.debug('HolderProver.reset_wallet >>>')
self._assert_link_secret('reset_wallet')
seed = self.wallet._seed
wallet_name = self.wallet.name
wallet_cfg = self.wallet.cfg
wallet_xtype = self.wallet.xtype
wallet_access_creds = self.wallet.access_creds
await self.wallet.close()
await self.wallet.remove()
self.wallet = await Wallet(
seed,
wallet_name,
wallet_xtype,
wallet_cfg,
wallet_access_creds).create()
await self.wallet.open()
await self.create_link_secret(self._link_secret) # carry over link secret to new wallet
rv = self.wallet.name
LOGGER.debug('HolderProver.reset_wallet <<< %s', rv)
return rv
|
[
"async",
"def",
"reset_wallet",
"(",
"self",
")",
"->",
"str",
":",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.reset_wallet >>>'",
")",
"self",
".",
"_assert_link_secret",
"(",
"'reset_wallet'",
")",
"seed",
"=",
"self",
".",
"wallet",
".",
"_seed",
"wallet_name",
"=",
"self",
".",
"wallet",
".",
"name",
"wallet_cfg",
"=",
"self",
".",
"wallet",
".",
"cfg",
"wallet_xtype",
"=",
"self",
".",
"wallet",
".",
"xtype",
"wallet_access_creds",
"=",
"self",
".",
"wallet",
".",
"access_creds",
"await",
"self",
".",
"wallet",
".",
"close",
"(",
")",
"await",
"self",
".",
"wallet",
".",
"remove",
"(",
")",
"self",
".",
"wallet",
"=",
"await",
"Wallet",
"(",
"seed",
",",
"wallet_name",
",",
"wallet_xtype",
",",
"wallet_cfg",
",",
"wallet_access_creds",
")",
".",
"create",
"(",
")",
"await",
"self",
".",
"wallet",
".",
"open",
"(",
")",
"await",
"self",
".",
"create_link_secret",
"(",
"self",
".",
"_link_secret",
")",
"# carry over link secret to new wallet",
"rv",
"=",
"self",
".",
"wallet",
".",
"name",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.reset_wallet <<< %s'",
",",
"rv",
")",
"return",
"rv"
] | 32.527778 | 20.861111 |
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: is_array_like(x) and len(x) == len(left)
is_rkey = lambda x: is_array_like(x) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_specification() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
if rk is not None:
right_keys.append(
right._get_label_or_level_values(rk))
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
if rk is not None:
right_keys.append(
right._get_label_or_level_values(rk))
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
if lk is not None:
left_keys.append(left._get_label_or_level_values(lk))
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lev_codes) for lev, lev_codes
in zip(self.right.index.levels,
self.right.index.codes)]
else:
right_keys = [self.right.index._values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right._get_label_or_level_values(k))
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lev_codes) for lev, lev_codes
in zip(self.left.index.levels,
self.left.index.codes)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left._drop_labels_or_levels(left_drop)
if right_drop:
self.right = self.right._drop_labels_or_levels(right_drop)
return left_keys, right_keys, join_names
|
[
"def",
"_get_merge_keys",
"(",
"self",
")",
":",
"left_keys",
"=",
"[",
"]",
"right_keys",
"=",
"[",
"]",
"join_names",
"=",
"[",
"]",
"right_drop",
"=",
"[",
"]",
"left_drop",
"=",
"[",
"]",
"left",
",",
"right",
"=",
"self",
".",
"left",
",",
"self",
".",
"right",
"is_lkey",
"=",
"lambda",
"x",
":",
"is_array_like",
"(",
"x",
")",
"and",
"len",
"(",
"x",
")",
"==",
"len",
"(",
"left",
")",
"is_rkey",
"=",
"lambda",
"x",
":",
"is_array_like",
"(",
"x",
")",
"and",
"len",
"(",
"x",
")",
"==",
"len",
"(",
"right",
")",
"# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A",
"# user could, for example, request 'left_index' and 'left_by'. In a",
"# regular pd.merge(), users cannot specify both 'left_index' and",
"# 'left_on'. (Instead, users have a MultiIndex). That means the",
"# self.left_on in this function is always empty in a pd.merge(), but",
"# a pd.merge_asof(left_index=True, left_by=...) will result in a",
"# self.left_on array with a None in the middle of it. This requires",
"# a work-around as designated in the code below.",
"# See _validate_specification() for where this happens.",
"# ugh, spaghetti re #733",
"if",
"_any",
"(",
"self",
".",
"left_on",
")",
"and",
"_any",
"(",
"self",
".",
"right_on",
")",
":",
"for",
"lk",
",",
"rk",
"in",
"zip",
"(",
"self",
".",
"left_on",
",",
"self",
".",
"right_on",
")",
":",
"if",
"is_lkey",
"(",
"lk",
")",
":",
"left_keys",
".",
"append",
"(",
"lk",
")",
"if",
"is_rkey",
"(",
"rk",
")",
":",
"right_keys",
".",
"append",
"(",
"rk",
")",
"join_names",
".",
"append",
"(",
"None",
")",
"# what to do?",
"else",
":",
"if",
"rk",
"is",
"not",
"None",
":",
"right_keys",
".",
"append",
"(",
"right",
".",
"_get_label_or_level_values",
"(",
"rk",
")",
")",
"join_names",
".",
"append",
"(",
"rk",
")",
"else",
":",
"# work-around for merge_asof(right_index=True)",
"right_keys",
".",
"append",
"(",
"right",
".",
"index",
")",
"join_names",
".",
"append",
"(",
"right",
".",
"index",
".",
"name",
")",
"else",
":",
"if",
"not",
"is_rkey",
"(",
"rk",
")",
":",
"if",
"rk",
"is",
"not",
"None",
":",
"right_keys",
".",
"append",
"(",
"right",
".",
"_get_label_or_level_values",
"(",
"rk",
")",
")",
"else",
":",
"# work-around for merge_asof(right_index=True)",
"right_keys",
".",
"append",
"(",
"right",
".",
"index",
")",
"if",
"lk",
"is",
"not",
"None",
"and",
"lk",
"==",
"rk",
":",
"# avoid key upcast in corner case (length-0)",
"if",
"len",
"(",
"left",
")",
">",
"0",
":",
"right_drop",
".",
"append",
"(",
"rk",
")",
"else",
":",
"left_drop",
".",
"append",
"(",
"lk",
")",
"else",
":",
"right_keys",
".",
"append",
"(",
"rk",
")",
"if",
"lk",
"is",
"not",
"None",
":",
"left_keys",
".",
"append",
"(",
"left",
".",
"_get_label_or_level_values",
"(",
"lk",
")",
")",
"join_names",
".",
"append",
"(",
"lk",
")",
"else",
":",
"# work-around for merge_asof(left_index=True)",
"left_keys",
".",
"append",
"(",
"left",
".",
"index",
")",
"join_names",
".",
"append",
"(",
"left",
".",
"index",
".",
"name",
")",
"elif",
"_any",
"(",
"self",
".",
"left_on",
")",
":",
"for",
"k",
"in",
"self",
".",
"left_on",
":",
"if",
"is_lkey",
"(",
"k",
")",
":",
"left_keys",
".",
"append",
"(",
"k",
")",
"join_names",
".",
"append",
"(",
"None",
")",
"else",
":",
"left_keys",
".",
"append",
"(",
"left",
".",
"_get_label_or_level_values",
"(",
"k",
")",
")",
"join_names",
".",
"append",
"(",
"k",
")",
"if",
"isinstance",
"(",
"self",
".",
"right",
".",
"index",
",",
"MultiIndex",
")",
":",
"right_keys",
"=",
"[",
"lev",
".",
"_values",
".",
"take",
"(",
"lev_codes",
")",
"for",
"lev",
",",
"lev_codes",
"in",
"zip",
"(",
"self",
".",
"right",
".",
"index",
".",
"levels",
",",
"self",
".",
"right",
".",
"index",
".",
"codes",
")",
"]",
"else",
":",
"right_keys",
"=",
"[",
"self",
".",
"right",
".",
"index",
".",
"_values",
"]",
"elif",
"_any",
"(",
"self",
".",
"right_on",
")",
":",
"for",
"k",
"in",
"self",
".",
"right_on",
":",
"if",
"is_rkey",
"(",
"k",
")",
":",
"right_keys",
".",
"append",
"(",
"k",
")",
"join_names",
".",
"append",
"(",
"None",
")",
"else",
":",
"right_keys",
".",
"append",
"(",
"right",
".",
"_get_label_or_level_values",
"(",
"k",
")",
")",
"join_names",
".",
"append",
"(",
"k",
")",
"if",
"isinstance",
"(",
"self",
".",
"left",
".",
"index",
",",
"MultiIndex",
")",
":",
"left_keys",
"=",
"[",
"lev",
".",
"_values",
".",
"take",
"(",
"lev_codes",
")",
"for",
"lev",
",",
"lev_codes",
"in",
"zip",
"(",
"self",
".",
"left",
".",
"index",
".",
"levels",
",",
"self",
".",
"left",
".",
"index",
".",
"codes",
")",
"]",
"else",
":",
"left_keys",
"=",
"[",
"self",
".",
"left",
".",
"index",
".",
"values",
"]",
"if",
"left_drop",
":",
"self",
".",
"left",
"=",
"self",
".",
"left",
".",
"_drop_labels_or_levels",
"(",
"left_drop",
")",
"if",
"right_drop",
":",
"self",
".",
"right",
"=",
"self",
".",
"right",
".",
"_drop_labels_or_levels",
"(",
"right_drop",
")",
"return",
"left_keys",
",",
"right_keys",
",",
"join_names"
] | 41.405405 | 18.810811 |
def logout(self):
"""
登出会话
:return: self
"""
self.req(API_ACCOUNT_LOGOUT % self.ck())
self.cookies = {}
self.user_alias = None
self.persist()
|
[
"def",
"logout",
"(",
"self",
")",
":",
"self",
".",
"req",
"(",
"API_ACCOUNT_LOGOUT",
"%",
"self",
".",
"ck",
"(",
")",
")",
"self",
".",
"cookies",
"=",
"{",
"}",
"self",
".",
"user_alias",
"=",
"None",
"self",
".",
"persist",
"(",
")"
] | 20.5 | 15.3 |
def rewrite_file_imports(item, vendored_libs, vendor_dir):
"""Rewrite 'import xxx' and 'from xxx import' for vendored_libs"""
text = item.read_text(encoding='utf-8')
renames = LIBRARY_RENAMES
for k in LIBRARY_RENAMES.keys():
if k not in vendored_libs:
vendored_libs.append(k)
for lib in vendored_libs:
to_lib = lib
if lib in renames:
to_lib = renames[lib]
text = re.sub(
r'([\n\s]*)import %s([\n\s\.]+)' % lib,
r'\1import %s\2' % to_lib,
text,
)
text = re.sub(
r'([\n\s]*)from %s([\s\.])+' % lib,
r'\1from %s\2' % to_lib,
text,
)
text = re.sub(
r"(\n\s*)__import__\('%s([\s'\.])+" % lib,
r"\1__import__('%s\2" % to_lib,
text,
)
item.write_text(text, encoding='utf-8')
|
[
"def",
"rewrite_file_imports",
"(",
"item",
",",
"vendored_libs",
",",
"vendor_dir",
")",
":",
"text",
"=",
"item",
".",
"read_text",
"(",
"encoding",
"=",
"'utf-8'",
")",
"renames",
"=",
"LIBRARY_RENAMES",
"for",
"k",
"in",
"LIBRARY_RENAMES",
".",
"keys",
"(",
")",
":",
"if",
"k",
"not",
"in",
"vendored_libs",
":",
"vendored_libs",
".",
"append",
"(",
"k",
")",
"for",
"lib",
"in",
"vendored_libs",
":",
"to_lib",
"=",
"lib",
"if",
"lib",
"in",
"renames",
":",
"to_lib",
"=",
"renames",
"[",
"lib",
"]",
"text",
"=",
"re",
".",
"sub",
"(",
"r'([\\n\\s]*)import %s([\\n\\s\\.]+)'",
"%",
"lib",
",",
"r'\\1import %s\\2'",
"%",
"to_lib",
",",
"text",
",",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"r'([\\n\\s]*)from %s([\\s\\.])+'",
"%",
"lib",
",",
"r'\\1from %s\\2'",
"%",
"to_lib",
",",
"text",
",",
")",
"text",
"=",
"re",
".",
"sub",
"(",
"r\"(\\n\\s*)__import__\\('%s([\\s'\\.])+\"",
"%",
"lib",
",",
"r\"\\1__import__('%s\\2\"",
"%",
"to_lib",
",",
"text",
",",
")",
"item",
".",
"write_text",
"(",
"text",
",",
"encoding",
"=",
"'utf-8'",
")"
] | 32.185185 | 13.296296 |
def _get_filename(self, key, filename):
"""Write key to file. Either this method or
:meth:`~simplekv.KeyValueStore._get_file` will be called by
:meth:`~simplekv.KeyValueStore.get_file`. This method only accepts
filenames and will open the file with a mode of ``wb``, then call
:meth:`~simplekv.KeyValueStore._get_file`.
:param key: Key to be retrieved
:param filename: Filename to write to
"""
with open(filename, 'wb') as dest:
return self._get_file(key, dest)
|
[
"def",
"_get_filename",
"(",
"self",
",",
"key",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"dest",
":",
"return",
"self",
".",
"_get_file",
"(",
"key",
",",
"dest",
")"
] | 44.583333 | 13.083333 |
def ingest(self, co, classname=None, code_objects={}, show_asm=None):
"""
Pick out tokens from an uncompyle6 code object, and transform them,
returning a list of uncompyle6 'Token's.
The transformations are made to assist the deparsing grammar.
Specificially:
- various types of LOAD_CONST's are categorized in terms of what they load
- COME_FROM instructions are added to assist parsing control structures
- MAKE_FUNCTION and FUNCTION_CALLS append the number of positional arguments
Also, when we encounter certain tokens, we add them to a set which will cause custom
grammar rules. Specifically, variable arg tokens like MAKE_FUNCTION or BUILD_LIST
cause specific rules for the specific number of arguments they take.
"""
if not show_asm:
show_asm = self.show_asm
bytecode = self.build_instructions(co)
# show_asm = 'after'
if show_asm in ('both', 'before'):
for instr in bytecode.get_instructions(co):
print(instr.disassemble())
# Container for tokens
tokens = []
customize = {}
if self.is_pypy:
customize['PyPy'] = 1
codelen = len(self.code)
free, names, varnames = self.unmangle_code_names(co, classname)
self.names = names
# Scan for assertions. Later we will
# turn 'LOAD_GLOBAL' to 'LOAD_ASSERT'.
# 'LOAD_ASSERT' is used in assert statements.
self.load_asserts = set()
for i in self.op_range(0, codelen):
# We need to detect the difference between:
# raise AssertionError
# and
# assert ...
if (self.code[i] == self.opc.JUMP_IF_TRUE and
i + 4 < codelen and
self.code[i+3] == self.opc.POP_TOP and
self.code[i+4] == self.opc.LOAD_GLOBAL):
if names[self.get_argument(i+4)] == 'AssertionError':
self.load_asserts.add(i+4)
jump_targets = self.find_jump_targets(show_asm)
# contains (code, [addrRefToCode])
last_stmt = self.next_stmt[0]
i = self.next_stmt[last_stmt]
replace = {}
while i < codelen - 1:
if self.lines[last_stmt].next > i:
# Distinguish "print ..." from "print ...,"
if self.code[last_stmt] == self.opc.PRINT_ITEM:
if self.code[i] == self.opc.PRINT_ITEM:
replace[i] = 'PRINT_ITEM_CONT'
elif self.code[i] == self.opc.PRINT_NEWLINE:
replace[i] = 'PRINT_NEWLINE_CONT'
last_stmt = i
i = self.next_stmt[i]
extended_arg = 0
for offset in self.op_range(0, codelen):
op = self.code[offset]
op_name = self.opname[op]
oparg = None; pattr = None
if offset in jump_targets:
jump_idx = 0
# We want to process COME_FROMs to the same offset to be in *descending*
# offset order so we have the larger range or biggest instruction interval
# last. (I think they are sorted in increasing order, but for safety
# we sort them). That way, specific COME_FROM tags will match up
# properly. For example, a "loop" with an "if" nested in it should have the
# "loop" tag last so the grammar rule matches that properly.
last_jump_offset = -1
for jump_offset in sorted(jump_targets[offset], reverse=True):
if jump_offset != last_jump_offset:
tokens.append(Token(
'COME_FROM', jump_offset, repr(jump_offset),
offset="%s_%d" % (offset, jump_idx),
has_arg = True))
jump_idx += 1
last_jump_offset = jump_offset
elif offset in self.thens:
tokens.append(Token(
'THEN', None, self.thens[offset],
offset="%s_0" % offset,
has_arg = True))
has_arg = (op >= self.opc.HAVE_ARGUMENT)
if has_arg:
oparg = self.get_argument(offset) + extended_arg
extended_arg = 0
if op == self.opc.EXTENDED_ARG:
extended_arg = oparg * L65536
continue
if op in self.opc.CONST_OPS:
const = co.co_consts[oparg]
# We can't use inspect.iscode() because we may be
# using a different version of Python than the
# one that this was byte-compiled on. So the code
# types may mismatch.
if hasattr(const, 'co_name'):
oparg = const
if const.co_name == '<lambda>':
assert op_name == 'LOAD_CONST'
op_name = 'LOAD_LAMBDA'
elif const.co_name == self.genexpr_name:
op_name = 'LOAD_GENEXPR'
elif const.co_name == '<dictcomp>':
op_name = 'LOAD_DICTCOMP'
elif const.co_name == '<setcomp>':
op_name = 'LOAD_SETCOMP'
# verify uses 'pattr' for comparison, since 'attr'
# now holds Code(const) and thus can not be used
# for comparison (todo: think about changing this)
# pattr = 'code_object @ 0x%x %s->%s' % \
# (id(const), const.co_filename, const.co_name)
pattr = '<code_object ' + const.co_name + '>'
else:
if oparg < len(co.co_consts):
argval, _ = _get_const_info(oparg, co.co_consts)
# Why don't we use _ above for "pattr" rather than "const"?
# This *is* a little hoaky, but we have to coordinate with
# other parts like n_LOAD_CONST in pysource.py for example.
pattr = const
pass
elif op in self.opc.NAME_OPS:
pattr = names[oparg]
elif op in self.opc.JREL_OPS:
pattr = repr(offset + 3 + oparg)
if op == self.opc.JUMP_FORWARD:
target = self.get_target(offset)
# FIXME: this is a hack to catch stuff like:
# if x: continue
# the "continue" is not on a new line.
if len(tokens) and tokens[-1].kind == 'JUMP_BACK':
tokens[-1].kind = intern('CONTINUE')
elif op in self.opc.JABS_OPS:
pattr = repr(oparg)
elif op in self.opc.LOCAL_OPS:
pattr = varnames[oparg]
elif op in self.opc.COMPARE_OPS:
pattr = self.opc.cmp_op[oparg]
elif op in self.opc.FREE_OPS:
pattr = free[oparg]
if op in self.varargs_ops:
# CE - Hack for >= 2.5
# Now all values loaded via LOAD_CLOSURE are packed into
# a tuple before calling MAKE_CLOSURE.
if (self.version >= 2.5 and op == self.opc.BUILD_TUPLE and
self.code[self.prev[offset]] == self.opc.LOAD_CLOSURE):
continue
else:
op_name = '%s_%d' % (op_name, oparg)
customize[op_name] = oparg
elif self.version > 2.0 and op == self.opc.CONTINUE_LOOP:
customize[op_name] = 0
elif op_name in """
CONTINUE_LOOP EXEC_STMT LOAD_LISTCOMP LOAD_SETCOMP
""".split():
customize[op_name] = 0
elif op == self.opc.JUMP_ABSOLUTE:
# Further classify JUMP_ABSOLUTE into backward jumps
# which are used in loops, and "CONTINUE" jumps which
# may appear in a "continue" statement. The loop-type
# and continue-type jumps will help us classify loop
# boundaries The continue-type jumps help us get
# "continue" statements with would otherwise be turned
# into a "pass" statement because JUMPs are sometimes
# ignored in rules as just boundary overhead. In
# comprehensions we might sometimes classify JUMP_BACK
# as CONTINUE, but that's okay since we add a grammar
# rule for that.
target = self.get_target(offset)
if target <= offset:
op_name = 'JUMP_BACK'
if (offset in self.stmts
and self.code[offset+3] not in (self.opc.END_FINALLY,
self.opc.POP_BLOCK)):
if ((offset in self.linestarts and
tokens[-1].kind == 'JUMP_BACK')
or offset not in self.not_continue):
op_name = 'CONTINUE'
else:
# FIXME: this is a hack to catch stuff like:
# if x: continue
# the "continue" is not on a new line.
if tokens[-1].kind == 'JUMP_BACK':
# We need 'intern' since we have
# already have processed the previous
# token.
tokens[-1].kind = intern('CONTINUE')
elif op == self.opc.LOAD_GLOBAL:
if offset in self.load_asserts:
op_name = 'LOAD_ASSERT'
elif op == self.opc.RETURN_VALUE:
if offset in self.return_end_ifs:
op_name = 'RETURN_END_IF'
linestart = self.linestarts.get(offset, None)
if offset not in replace:
tokens.append(Token(
op_name, oparg, pattr, offset, linestart, op,
has_arg, self.opc))
else:
tokens.append(Token(
replace[offset], oparg, pattr, offset, linestart, op,
has_arg, self.opc))
pass
pass
if show_asm in ('both', 'after'):
for t in tokens:
print(t.format(line_prefix='L.'))
print()
return tokens, customize
|
[
"def",
"ingest",
"(",
"self",
",",
"co",
",",
"classname",
"=",
"None",
",",
"code_objects",
"=",
"{",
"}",
",",
"show_asm",
"=",
"None",
")",
":",
"if",
"not",
"show_asm",
":",
"show_asm",
"=",
"self",
".",
"show_asm",
"bytecode",
"=",
"self",
".",
"build_instructions",
"(",
"co",
")",
"# show_asm = 'after'",
"if",
"show_asm",
"in",
"(",
"'both'",
",",
"'before'",
")",
":",
"for",
"instr",
"in",
"bytecode",
".",
"get_instructions",
"(",
"co",
")",
":",
"print",
"(",
"instr",
".",
"disassemble",
"(",
")",
")",
"# Container for tokens",
"tokens",
"=",
"[",
"]",
"customize",
"=",
"{",
"}",
"if",
"self",
".",
"is_pypy",
":",
"customize",
"[",
"'PyPy'",
"]",
"=",
"1",
"codelen",
"=",
"len",
"(",
"self",
".",
"code",
")",
"free",
",",
"names",
",",
"varnames",
"=",
"self",
".",
"unmangle_code_names",
"(",
"co",
",",
"classname",
")",
"self",
".",
"names",
"=",
"names",
"# Scan for assertions. Later we will",
"# turn 'LOAD_GLOBAL' to 'LOAD_ASSERT'.",
"# 'LOAD_ASSERT' is used in assert statements.",
"self",
".",
"load_asserts",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"self",
".",
"op_range",
"(",
"0",
",",
"codelen",
")",
":",
"# We need to detect the difference between:",
"# raise AssertionError",
"# and",
"# assert ...",
"if",
"(",
"self",
".",
"code",
"[",
"i",
"]",
"==",
"self",
".",
"opc",
".",
"JUMP_IF_TRUE",
"and",
"i",
"+",
"4",
"<",
"codelen",
"and",
"self",
".",
"code",
"[",
"i",
"+",
"3",
"]",
"==",
"self",
".",
"opc",
".",
"POP_TOP",
"and",
"self",
".",
"code",
"[",
"i",
"+",
"4",
"]",
"==",
"self",
".",
"opc",
".",
"LOAD_GLOBAL",
")",
":",
"if",
"names",
"[",
"self",
".",
"get_argument",
"(",
"i",
"+",
"4",
")",
"]",
"==",
"'AssertionError'",
":",
"self",
".",
"load_asserts",
".",
"add",
"(",
"i",
"+",
"4",
")",
"jump_targets",
"=",
"self",
".",
"find_jump_targets",
"(",
"show_asm",
")",
"# contains (code, [addrRefToCode])",
"last_stmt",
"=",
"self",
".",
"next_stmt",
"[",
"0",
"]",
"i",
"=",
"self",
".",
"next_stmt",
"[",
"last_stmt",
"]",
"replace",
"=",
"{",
"}",
"while",
"i",
"<",
"codelen",
"-",
"1",
":",
"if",
"self",
".",
"lines",
"[",
"last_stmt",
"]",
".",
"next",
">",
"i",
":",
"# Distinguish \"print ...\" from \"print ...,\"",
"if",
"self",
".",
"code",
"[",
"last_stmt",
"]",
"==",
"self",
".",
"opc",
".",
"PRINT_ITEM",
":",
"if",
"self",
".",
"code",
"[",
"i",
"]",
"==",
"self",
".",
"opc",
".",
"PRINT_ITEM",
":",
"replace",
"[",
"i",
"]",
"=",
"'PRINT_ITEM_CONT'",
"elif",
"self",
".",
"code",
"[",
"i",
"]",
"==",
"self",
".",
"opc",
".",
"PRINT_NEWLINE",
":",
"replace",
"[",
"i",
"]",
"=",
"'PRINT_NEWLINE_CONT'",
"last_stmt",
"=",
"i",
"i",
"=",
"self",
".",
"next_stmt",
"[",
"i",
"]",
"extended_arg",
"=",
"0",
"for",
"offset",
"in",
"self",
".",
"op_range",
"(",
"0",
",",
"codelen",
")",
":",
"op",
"=",
"self",
".",
"code",
"[",
"offset",
"]",
"op_name",
"=",
"self",
".",
"opname",
"[",
"op",
"]",
"oparg",
"=",
"None",
"pattr",
"=",
"None",
"if",
"offset",
"in",
"jump_targets",
":",
"jump_idx",
"=",
"0",
"# We want to process COME_FROMs to the same offset to be in *descending*",
"# offset order so we have the larger range or biggest instruction interval",
"# last. (I think they are sorted in increasing order, but for safety",
"# we sort them). That way, specific COME_FROM tags will match up",
"# properly. For example, a \"loop\" with an \"if\" nested in it should have the",
"# \"loop\" tag last so the grammar rule matches that properly.",
"last_jump_offset",
"=",
"-",
"1",
"for",
"jump_offset",
"in",
"sorted",
"(",
"jump_targets",
"[",
"offset",
"]",
",",
"reverse",
"=",
"True",
")",
":",
"if",
"jump_offset",
"!=",
"last_jump_offset",
":",
"tokens",
".",
"append",
"(",
"Token",
"(",
"'COME_FROM'",
",",
"jump_offset",
",",
"repr",
"(",
"jump_offset",
")",
",",
"offset",
"=",
"\"%s_%d\"",
"%",
"(",
"offset",
",",
"jump_idx",
")",
",",
"has_arg",
"=",
"True",
")",
")",
"jump_idx",
"+=",
"1",
"last_jump_offset",
"=",
"jump_offset",
"elif",
"offset",
"in",
"self",
".",
"thens",
":",
"tokens",
".",
"append",
"(",
"Token",
"(",
"'THEN'",
",",
"None",
",",
"self",
".",
"thens",
"[",
"offset",
"]",
",",
"offset",
"=",
"\"%s_0\"",
"%",
"offset",
",",
"has_arg",
"=",
"True",
")",
")",
"has_arg",
"=",
"(",
"op",
">=",
"self",
".",
"opc",
".",
"HAVE_ARGUMENT",
")",
"if",
"has_arg",
":",
"oparg",
"=",
"self",
".",
"get_argument",
"(",
"offset",
")",
"+",
"extended_arg",
"extended_arg",
"=",
"0",
"if",
"op",
"==",
"self",
".",
"opc",
".",
"EXTENDED_ARG",
":",
"extended_arg",
"=",
"oparg",
"*",
"L65536",
"continue",
"if",
"op",
"in",
"self",
".",
"opc",
".",
"CONST_OPS",
":",
"const",
"=",
"co",
".",
"co_consts",
"[",
"oparg",
"]",
"# We can't use inspect.iscode() because we may be",
"# using a different version of Python than the",
"# one that this was byte-compiled on. So the code",
"# types may mismatch.",
"if",
"hasattr",
"(",
"const",
",",
"'co_name'",
")",
":",
"oparg",
"=",
"const",
"if",
"const",
".",
"co_name",
"==",
"'<lambda>'",
":",
"assert",
"op_name",
"==",
"'LOAD_CONST'",
"op_name",
"=",
"'LOAD_LAMBDA'",
"elif",
"const",
".",
"co_name",
"==",
"self",
".",
"genexpr_name",
":",
"op_name",
"=",
"'LOAD_GENEXPR'",
"elif",
"const",
".",
"co_name",
"==",
"'<dictcomp>'",
":",
"op_name",
"=",
"'LOAD_DICTCOMP'",
"elif",
"const",
".",
"co_name",
"==",
"'<setcomp>'",
":",
"op_name",
"=",
"'LOAD_SETCOMP'",
"# verify uses 'pattr' for comparison, since 'attr'",
"# now holds Code(const) and thus can not be used",
"# for comparison (todo: think about changing this)",
"# pattr = 'code_object @ 0x%x %s->%s' % \\",
"# (id(const), const.co_filename, const.co_name)",
"pattr",
"=",
"'<code_object '",
"+",
"const",
".",
"co_name",
"+",
"'>'",
"else",
":",
"if",
"oparg",
"<",
"len",
"(",
"co",
".",
"co_consts",
")",
":",
"argval",
",",
"_",
"=",
"_get_const_info",
"(",
"oparg",
",",
"co",
".",
"co_consts",
")",
"# Why don't we use _ above for \"pattr\" rather than \"const\"?",
"# This *is* a little hoaky, but we have to coordinate with",
"# other parts like n_LOAD_CONST in pysource.py for example.",
"pattr",
"=",
"const",
"pass",
"elif",
"op",
"in",
"self",
".",
"opc",
".",
"NAME_OPS",
":",
"pattr",
"=",
"names",
"[",
"oparg",
"]",
"elif",
"op",
"in",
"self",
".",
"opc",
".",
"JREL_OPS",
":",
"pattr",
"=",
"repr",
"(",
"offset",
"+",
"3",
"+",
"oparg",
")",
"if",
"op",
"==",
"self",
".",
"opc",
".",
"JUMP_FORWARD",
":",
"target",
"=",
"self",
".",
"get_target",
"(",
"offset",
")",
"# FIXME: this is a hack to catch stuff like:",
"# if x: continue",
"# the \"continue\" is not on a new line.",
"if",
"len",
"(",
"tokens",
")",
"and",
"tokens",
"[",
"-",
"1",
"]",
".",
"kind",
"==",
"'JUMP_BACK'",
":",
"tokens",
"[",
"-",
"1",
"]",
".",
"kind",
"=",
"intern",
"(",
"'CONTINUE'",
")",
"elif",
"op",
"in",
"self",
".",
"opc",
".",
"JABS_OPS",
":",
"pattr",
"=",
"repr",
"(",
"oparg",
")",
"elif",
"op",
"in",
"self",
".",
"opc",
".",
"LOCAL_OPS",
":",
"pattr",
"=",
"varnames",
"[",
"oparg",
"]",
"elif",
"op",
"in",
"self",
".",
"opc",
".",
"COMPARE_OPS",
":",
"pattr",
"=",
"self",
".",
"opc",
".",
"cmp_op",
"[",
"oparg",
"]",
"elif",
"op",
"in",
"self",
".",
"opc",
".",
"FREE_OPS",
":",
"pattr",
"=",
"free",
"[",
"oparg",
"]",
"if",
"op",
"in",
"self",
".",
"varargs_ops",
":",
"# CE - Hack for >= 2.5",
"# Now all values loaded via LOAD_CLOSURE are packed into",
"# a tuple before calling MAKE_CLOSURE.",
"if",
"(",
"self",
".",
"version",
">=",
"2.5",
"and",
"op",
"==",
"self",
".",
"opc",
".",
"BUILD_TUPLE",
"and",
"self",
".",
"code",
"[",
"self",
".",
"prev",
"[",
"offset",
"]",
"]",
"==",
"self",
".",
"opc",
".",
"LOAD_CLOSURE",
")",
":",
"continue",
"else",
":",
"op_name",
"=",
"'%s_%d'",
"%",
"(",
"op_name",
",",
"oparg",
")",
"customize",
"[",
"op_name",
"]",
"=",
"oparg",
"elif",
"self",
".",
"version",
">",
"2.0",
"and",
"op",
"==",
"self",
".",
"opc",
".",
"CONTINUE_LOOP",
":",
"customize",
"[",
"op_name",
"]",
"=",
"0",
"elif",
"op_name",
"in",
"\"\"\"\n CONTINUE_LOOP EXEC_STMT LOAD_LISTCOMP LOAD_SETCOMP\n \"\"\"",
".",
"split",
"(",
")",
":",
"customize",
"[",
"op_name",
"]",
"=",
"0",
"elif",
"op",
"==",
"self",
".",
"opc",
".",
"JUMP_ABSOLUTE",
":",
"# Further classify JUMP_ABSOLUTE into backward jumps",
"# which are used in loops, and \"CONTINUE\" jumps which",
"# may appear in a \"continue\" statement. The loop-type",
"# and continue-type jumps will help us classify loop",
"# boundaries The continue-type jumps help us get",
"# \"continue\" statements with would otherwise be turned",
"# into a \"pass\" statement because JUMPs are sometimes",
"# ignored in rules as just boundary overhead. In",
"# comprehensions we might sometimes classify JUMP_BACK",
"# as CONTINUE, but that's okay since we add a grammar",
"# rule for that.",
"target",
"=",
"self",
".",
"get_target",
"(",
"offset",
")",
"if",
"target",
"<=",
"offset",
":",
"op_name",
"=",
"'JUMP_BACK'",
"if",
"(",
"offset",
"in",
"self",
".",
"stmts",
"and",
"self",
".",
"code",
"[",
"offset",
"+",
"3",
"]",
"not",
"in",
"(",
"self",
".",
"opc",
".",
"END_FINALLY",
",",
"self",
".",
"opc",
".",
"POP_BLOCK",
")",
")",
":",
"if",
"(",
"(",
"offset",
"in",
"self",
".",
"linestarts",
"and",
"tokens",
"[",
"-",
"1",
"]",
".",
"kind",
"==",
"'JUMP_BACK'",
")",
"or",
"offset",
"not",
"in",
"self",
".",
"not_continue",
")",
":",
"op_name",
"=",
"'CONTINUE'",
"else",
":",
"# FIXME: this is a hack to catch stuff like:",
"# if x: continue",
"# the \"continue\" is not on a new line.",
"if",
"tokens",
"[",
"-",
"1",
"]",
".",
"kind",
"==",
"'JUMP_BACK'",
":",
"# We need 'intern' since we have",
"# already have processed the previous",
"# token.",
"tokens",
"[",
"-",
"1",
"]",
".",
"kind",
"=",
"intern",
"(",
"'CONTINUE'",
")",
"elif",
"op",
"==",
"self",
".",
"opc",
".",
"LOAD_GLOBAL",
":",
"if",
"offset",
"in",
"self",
".",
"load_asserts",
":",
"op_name",
"=",
"'LOAD_ASSERT'",
"elif",
"op",
"==",
"self",
".",
"opc",
".",
"RETURN_VALUE",
":",
"if",
"offset",
"in",
"self",
".",
"return_end_ifs",
":",
"op_name",
"=",
"'RETURN_END_IF'",
"linestart",
"=",
"self",
".",
"linestarts",
".",
"get",
"(",
"offset",
",",
"None",
")",
"if",
"offset",
"not",
"in",
"replace",
":",
"tokens",
".",
"append",
"(",
"Token",
"(",
"op_name",
",",
"oparg",
",",
"pattr",
",",
"offset",
",",
"linestart",
",",
"op",
",",
"has_arg",
",",
"self",
".",
"opc",
")",
")",
"else",
":",
"tokens",
".",
"append",
"(",
"Token",
"(",
"replace",
"[",
"offset",
"]",
",",
"oparg",
",",
"pattr",
",",
"offset",
",",
"linestart",
",",
"op",
",",
"has_arg",
",",
"self",
".",
"opc",
")",
")",
"pass",
"pass",
"if",
"show_asm",
"in",
"(",
"'both'",
",",
"'after'",
")",
":",
"for",
"t",
"in",
"tokens",
":",
"print",
"(",
"t",
".",
"format",
"(",
"line_prefix",
"=",
"'L.'",
")",
")",
"print",
"(",
")",
"return",
"tokens",
",",
"customize"
] | 46.190476 | 18.419913 |
def run(cmd):
"""Run the given command.
Raises OSError is the command returns a non-zero exit status.
"""
log.debug("running '%s'", cmd)
fixed_cmd = cmd
if sys.platform == "win32" and cmd.count('"') > 2:
fixed_cmd = '"' + cmd + '"'
retval = os.system(fixed_cmd)
if hasattr(os, "WEXITSTATUS"):
status = os.WEXITSTATUS(retval)
else:
status = retval
if status:
raise OSError(status, "error running '%s'" % cmd)
|
[
"def",
"run",
"(",
"cmd",
")",
":",
"log",
".",
"debug",
"(",
"\"running '%s'\"",
",",
"cmd",
")",
"fixed_cmd",
"=",
"cmd",
"if",
"sys",
".",
"platform",
"==",
"\"win32\"",
"and",
"cmd",
".",
"count",
"(",
"'\"'",
")",
">",
"2",
":",
"fixed_cmd",
"=",
"'\"'",
"+",
"cmd",
"+",
"'\"'",
"retval",
"=",
"os",
".",
"system",
"(",
"fixed_cmd",
")",
"if",
"hasattr",
"(",
"os",
",",
"\"WEXITSTATUS\"",
")",
":",
"status",
"=",
"os",
".",
"WEXITSTATUS",
"(",
"retval",
")",
"else",
":",
"status",
"=",
"retval",
"if",
"status",
":",
"raise",
"OSError",
"(",
"status",
",",
"\"error running '%s'\"",
"%",
"cmd",
")"
] | 29.0625 | 15.1875 |
def authorizer(self, schemes, resource, action, request_args):
"""Construct the Authorization header for a request.
Args:
schemes (list of str): Authentication schemes supported for the
requested action.
resource (str): Object upon which an action is being performed.
action (str): Action being performed.
request_args (list of str): Arguments passed to the action call.
Returns:
(str, str) A tuple of the auth scheme satisfied, and the credential
for the Authorization header or empty strings if none could be
satisfied.
"""
if not schemes:
return u'', u''
for scheme in schemes:
if scheme in self.schemes and self.has_auth_params(scheme):
cred = Context.format_auth_params(self.schemes[scheme][u'params'])
if hasattr(self, 'mfa_token'):
cred = '{}, mfa_token="{}"'.format(cred, self.mfa_token)
return scheme, cred
raise AuthenticationError(self, schemes)
|
[
"def",
"authorizer",
"(",
"self",
",",
"schemes",
",",
"resource",
",",
"action",
",",
"request_args",
")",
":",
"if",
"not",
"schemes",
":",
"return",
"u''",
",",
"u''",
"for",
"scheme",
"in",
"schemes",
":",
"if",
"scheme",
"in",
"self",
".",
"schemes",
"and",
"self",
".",
"has_auth_params",
"(",
"scheme",
")",
":",
"cred",
"=",
"Context",
".",
"format_auth_params",
"(",
"self",
".",
"schemes",
"[",
"scheme",
"]",
"[",
"u'params'",
"]",
")",
"if",
"hasattr",
"(",
"self",
",",
"'mfa_token'",
")",
":",
"cred",
"=",
"'{}, mfa_token=\"{}\"'",
".",
"format",
"(",
"cred",
",",
"self",
".",
"mfa_token",
")",
"return",
"scheme",
",",
"cred",
"raise",
"AuthenticationError",
"(",
"self",
",",
"schemes",
")"
] | 42.76 | 22.72 |
def ParseLastVisitedRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
"""Parses a last visited row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (SQLiteCache): cache which contains cached results from querying
the visits and urls tables.
database (Optional[SQLiteDatabase]): database.
"""
query_hash = hash(query)
hidden = self._GetRowValue(query_hash, row, 'hidden')
transition = self._GetRowValue(query_hash, row, 'transition')
visit_identifier = self._GetRowValue(query_hash, row, 'visit_id')
from_visit = self._GetRowValue(query_hash, row, 'from_visit')
event_data = ChromeHistoryPageVisitedEventData()
event_data.from_visit = self._GetUrl(from_visit, cache, database)
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.page_transition_type = (
transition & self._PAGE_TRANSITION_CORE_MASK)
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.typed_count = self._GetRowValue(query_hash, row, 'typed_count')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.url_hidden = hidden == '1'
event_data.visit_source = self._GetVisitSource(
visit_identifier, cache, database)
timestamp = self._GetRowValue(query_hash, row, 'visit_time')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
[
"def",
"ParseLastVisitedRow",
"(",
"self",
",",
"parser_mediator",
",",
"query",
",",
"row",
",",
"cache",
"=",
"None",
",",
"database",
"=",
"None",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"query_hash",
"=",
"hash",
"(",
"query",
")",
"hidden",
"=",
"self",
".",
"_GetRowValue",
"(",
"query_hash",
",",
"row",
",",
"'hidden'",
")",
"transition",
"=",
"self",
".",
"_GetRowValue",
"(",
"query_hash",
",",
"row",
",",
"'transition'",
")",
"visit_identifier",
"=",
"self",
".",
"_GetRowValue",
"(",
"query_hash",
",",
"row",
",",
"'visit_id'",
")",
"from_visit",
"=",
"self",
".",
"_GetRowValue",
"(",
"query_hash",
",",
"row",
",",
"'from_visit'",
")",
"event_data",
"=",
"ChromeHistoryPageVisitedEventData",
"(",
")",
"event_data",
".",
"from_visit",
"=",
"self",
".",
"_GetUrl",
"(",
"from_visit",
",",
"cache",
",",
"database",
")",
"event_data",
".",
"offset",
"=",
"self",
".",
"_GetRowValue",
"(",
"query_hash",
",",
"row",
",",
"'id'",
")",
"event_data",
".",
"query",
"=",
"query",
"event_data",
".",
"page_transition_type",
"=",
"(",
"transition",
"&",
"self",
".",
"_PAGE_TRANSITION_CORE_MASK",
")",
"event_data",
".",
"title",
"=",
"self",
".",
"_GetRowValue",
"(",
"query_hash",
",",
"row",
",",
"'title'",
")",
"event_data",
".",
"typed_count",
"=",
"self",
".",
"_GetRowValue",
"(",
"query_hash",
",",
"row",
",",
"'typed_count'",
")",
"event_data",
".",
"url",
"=",
"self",
".",
"_GetRowValue",
"(",
"query_hash",
",",
"row",
",",
"'url'",
")",
"event_data",
".",
"url_hidden",
"=",
"hidden",
"==",
"'1'",
"event_data",
".",
"visit_source",
"=",
"self",
".",
"_GetVisitSource",
"(",
"visit_identifier",
",",
"cache",
",",
"database",
")",
"timestamp",
"=",
"self",
".",
"_GetRowValue",
"(",
"query_hash",
",",
"row",
",",
"'visit_time'",
")",
"date_time",
"=",
"dfdatetime_webkit_time",
".",
"WebKitTime",
"(",
"timestamp",
"=",
"timestamp",
")",
"event",
"=",
"time_events",
".",
"DateTimeValuesEvent",
"(",
"date_time",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_VISITED",
")",
"parser_mediator",
".",
"ProduceEventWithEventData",
"(",
"event",
",",
"event_data",
")"
] | 44.25 | 20.4 |
def flatten(obj):
'''
TODO: add docs
'''
if isseq(obj):
ret = []
for item in obj:
if isseq(item):
ret.extend(flatten(item))
else:
ret.append(item)
return ret
if isdict(obj):
ret = dict()
for key, value in obj.items():
for skey, sval in _relflatten(value):
ret[key + skey] = sval
return ret
raise ValueError(
'only list- and dict-like objects can be flattened, not %r' % (obj,))
|
[
"def",
"flatten",
"(",
"obj",
")",
":",
"if",
"isseq",
"(",
"obj",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"item",
"in",
"obj",
":",
"if",
"isseq",
"(",
"item",
")",
":",
"ret",
".",
"extend",
"(",
"flatten",
"(",
"item",
")",
")",
"else",
":",
"ret",
".",
"append",
"(",
"item",
")",
"return",
"ret",
"if",
"isdict",
"(",
"obj",
")",
":",
"ret",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"obj",
".",
"items",
"(",
")",
":",
"for",
"skey",
",",
"sval",
"in",
"_relflatten",
"(",
"value",
")",
":",
"ret",
"[",
"key",
"+",
"skey",
"]",
"=",
"sval",
"return",
"ret",
"raise",
"ValueError",
"(",
"'only list- and dict-like objects can be flattened, not %r'",
"%",
"(",
"obj",
",",
")",
")"
] | 22 | 21.6 |
def WriteXml(self, w, option, elementName=None):
""" Method writes the xml representation of the managed object. """
if elementName == None:
x = w.createElement(self.propMoMeta.xmlAttribute)
else:
x = w.createElement(elementName)
for at in UcsUtils.GetUcsPropertyMetaAttributeList(self.classId):
atMeta = UcsUtils.GetUcsMethodMeta(self.classId, at)
if (atMeta.io == "Output"):
continue
if atMeta.isComplexType:
if (getattr(self, at) != None):
x.appendChild(self.__dict__[at].WriteXml(w, option, UcsUtils.WordL(at)))
elif (getattr(self, at) != None):
x.setAttribute(atMeta.xmlAttribute, getattr(self, at))
x_child = self.childWriteXml(w, option)
for xc in x_child:
if (xc != None):
x.appendChild(xc)
return x
|
[
"def",
"WriteXml",
"(",
"self",
",",
"w",
",",
"option",
",",
"elementName",
"=",
"None",
")",
":",
"if",
"elementName",
"==",
"None",
":",
"x",
"=",
"w",
".",
"createElement",
"(",
"self",
".",
"propMoMeta",
".",
"xmlAttribute",
")",
"else",
":",
"x",
"=",
"w",
".",
"createElement",
"(",
"elementName",
")",
"for",
"at",
"in",
"UcsUtils",
".",
"GetUcsPropertyMetaAttributeList",
"(",
"self",
".",
"classId",
")",
":",
"atMeta",
"=",
"UcsUtils",
".",
"GetUcsMethodMeta",
"(",
"self",
".",
"classId",
",",
"at",
")",
"if",
"(",
"atMeta",
".",
"io",
"==",
"\"Output\"",
")",
":",
"continue",
"if",
"atMeta",
".",
"isComplexType",
":",
"if",
"(",
"getattr",
"(",
"self",
",",
"at",
")",
"!=",
"None",
")",
":",
"x",
".",
"appendChild",
"(",
"self",
".",
"__dict__",
"[",
"at",
"]",
".",
"WriteXml",
"(",
"w",
",",
"option",
",",
"UcsUtils",
".",
"WordL",
"(",
"at",
")",
")",
")",
"elif",
"(",
"getattr",
"(",
"self",
",",
"at",
")",
"!=",
"None",
")",
":",
"x",
".",
"setAttribute",
"(",
"atMeta",
".",
"xmlAttribute",
",",
"getattr",
"(",
"self",
",",
"at",
")",
")",
"x_child",
"=",
"self",
".",
"childWriteXml",
"(",
"w",
",",
"option",
")",
"for",
"xc",
"in",
"x_child",
":",
"if",
"(",
"xc",
"!=",
"None",
")",
":",
"x",
".",
"appendChild",
"(",
"xc",
")",
"return",
"x"
] | 37.2 | 16.05 |
def write_zip_fp(fp, data, properties, dir_data_list=None):
"""
Write custom zip file of data and properties to fp
:param fp: the file point to which to write the header
:param data: the data to write to the file; may be None
:param properties: the properties to write to the file; may be None
:param dir_data_list: optional list of directory header information structures
If dir_data_list is specified, data should be None and properties should
be specified. Then the existing data structure will be left alone and only
the directory headers and end of directory header will be written.
Otherwise, if both data and properties are specified, both are written
out in full.
The properties param must not change during this method. Callers should
take care to ensure this does not happen.
"""
assert data is not None or properties is not None
# dir_data_list has the format: local file record offset, name, data length, crc32
dir_data_list = list() if dir_data_list is None else dir_data_list
dt = datetime.datetime.now()
if data is not None:
offset_data = fp.tell()
def write_data(fp):
numpy_start_pos = fp.tell()
numpy.save(fp, data)
numpy_end_pos = fp.tell()
fp.seek(numpy_start_pos)
data_c = numpy.require(data, dtype=data.dtype, requirements=["C_CONTIGUOUS"])
header_data = fp.read((numpy_end_pos - numpy_start_pos) - data_c.nbytes) # read the header
data_crc32 = binascii.crc32(data_c.data, binascii.crc32(header_data)) & 0xFFFFFFFF
fp.seek(numpy_end_pos)
return data_crc32
data_len, crc32 = write_local_file(fp, b"data.npy", write_data, dt)
dir_data_list.append((offset_data, b"data.npy", data_len, crc32))
if properties is not None:
json_str = str()
try:
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Geometry.IntPoint) or isinstance(obj, Geometry.IntSize) or isinstance(obj, Geometry.IntRect) or isinstance(obj, Geometry.FloatPoint) or isinstance(obj, Geometry.FloatSize) or isinstance(obj, Geometry.FloatRect):
return tuple(obj)
else:
return json.JSONEncoder.default(self, obj)
json_io = io.StringIO()
json.dump(properties, json_io, cls=JSONEncoder)
json_str = json_io.getvalue()
except Exception as e:
# catch exceptions to avoid corrupt zip files
import traceback
logging.error("Exception writing zip file %s" + str(e))
traceback.print_exc()
traceback.print_stack()
def write_json(fp):
json_bytes = bytes(json_str, 'ISO-8859-1')
fp.write(json_bytes)
return binascii.crc32(json_bytes) & 0xFFFFFFFF
offset_json = fp.tell()
json_len, json_crc32 = write_local_file(fp, b"metadata.json", write_json, dt)
dir_data_list.append((offset_json, b"metadata.json", json_len, json_crc32))
dir_offset = fp.tell()
for offset, name_bytes, data_len, crc32 in dir_data_list:
write_directory_data(fp, offset, name_bytes, data_len, crc32, dt)
dir_size = fp.tell() - dir_offset
write_end_of_directory(fp, dir_size, dir_offset, len(dir_data_list))
fp.truncate()
|
[
"def",
"write_zip_fp",
"(",
"fp",
",",
"data",
",",
"properties",
",",
"dir_data_list",
"=",
"None",
")",
":",
"assert",
"data",
"is",
"not",
"None",
"or",
"properties",
"is",
"not",
"None",
"# dir_data_list has the format: local file record offset, name, data length, crc32",
"dir_data_list",
"=",
"list",
"(",
")",
"if",
"dir_data_list",
"is",
"None",
"else",
"dir_data_list",
"dt",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"if",
"data",
"is",
"not",
"None",
":",
"offset_data",
"=",
"fp",
".",
"tell",
"(",
")",
"def",
"write_data",
"(",
"fp",
")",
":",
"numpy_start_pos",
"=",
"fp",
".",
"tell",
"(",
")",
"numpy",
".",
"save",
"(",
"fp",
",",
"data",
")",
"numpy_end_pos",
"=",
"fp",
".",
"tell",
"(",
")",
"fp",
".",
"seek",
"(",
"numpy_start_pos",
")",
"data_c",
"=",
"numpy",
".",
"require",
"(",
"data",
",",
"dtype",
"=",
"data",
".",
"dtype",
",",
"requirements",
"=",
"[",
"\"C_CONTIGUOUS\"",
"]",
")",
"header_data",
"=",
"fp",
".",
"read",
"(",
"(",
"numpy_end_pos",
"-",
"numpy_start_pos",
")",
"-",
"data_c",
".",
"nbytes",
")",
"# read the header",
"data_crc32",
"=",
"binascii",
".",
"crc32",
"(",
"data_c",
".",
"data",
",",
"binascii",
".",
"crc32",
"(",
"header_data",
")",
")",
"&",
"0xFFFFFFFF",
"fp",
".",
"seek",
"(",
"numpy_end_pos",
")",
"return",
"data_crc32",
"data_len",
",",
"crc32",
"=",
"write_local_file",
"(",
"fp",
",",
"b\"data.npy\"",
",",
"write_data",
",",
"dt",
")",
"dir_data_list",
".",
"append",
"(",
"(",
"offset_data",
",",
"b\"data.npy\"",
",",
"data_len",
",",
"crc32",
")",
")",
"if",
"properties",
"is",
"not",
"None",
":",
"json_str",
"=",
"str",
"(",
")",
"try",
":",
"class",
"JSONEncoder",
"(",
"json",
".",
"JSONEncoder",
")",
":",
"def",
"default",
"(",
"self",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"Geometry",
".",
"IntPoint",
")",
"or",
"isinstance",
"(",
"obj",
",",
"Geometry",
".",
"IntSize",
")",
"or",
"isinstance",
"(",
"obj",
",",
"Geometry",
".",
"IntRect",
")",
"or",
"isinstance",
"(",
"obj",
",",
"Geometry",
".",
"FloatPoint",
")",
"or",
"isinstance",
"(",
"obj",
",",
"Geometry",
".",
"FloatSize",
")",
"or",
"isinstance",
"(",
"obj",
",",
"Geometry",
".",
"FloatRect",
")",
":",
"return",
"tuple",
"(",
"obj",
")",
"else",
":",
"return",
"json",
".",
"JSONEncoder",
".",
"default",
"(",
"self",
",",
"obj",
")",
"json_io",
"=",
"io",
".",
"StringIO",
"(",
")",
"json",
".",
"dump",
"(",
"properties",
",",
"json_io",
",",
"cls",
"=",
"JSONEncoder",
")",
"json_str",
"=",
"json_io",
".",
"getvalue",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"# catch exceptions to avoid corrupt zip files",
"import",
"traceback",
"logging",
".",
"error",
"(",
"\"Exception writing zip file %s\"",
"+",
"str",
"(",
"e",
")",
")",
"traceback",
".",
"print_exc",
"(",
")",
"traceback",
".",
"print_stack",
"(",
")",
"def",
"write_json",
"(",
"fp",
")",
":",
"json_bytes",
"=",
"bytes",
"(",
"json_str",
",",
"'ISO-8859-1'",
")",
"fp",
".",
"write",
"(",
"json_bytes",
")",
"return",
"binascii",
".",
"crc32",
"(",
"json_bytes",
")",
"&",
"0xFFFFFFFF",
"offset_json",
"=",
"fp",
".",
"tell",
"(",
")",
"json_len",
",",
"json_crc32",
"=",
"write_local_file",
"(",
"fp",
",",
"b\"metadata.json\"",
",",
"write_json",
",",
"dt",
")",
"dir_data_list",
".",
"append",
"(",
"(",
"offset_json",
",",
"b\"metadata.json\"",
",",
"json_len",
",",
"json_crc32",
")",
")",
"dir_offset",
"=",
"fp",
".",
"tell",
"(",
")",
"for",
"offset",
",",
"name_bytes",
",",
"data_len",
",",
"crc32",
"in",
"dir_data_list",
":",
"write_directory_data",
"(",
"fp",
",",
"offset",
",",
"name_bytes",
",",
"data_len",
",",
"crc32",
",",
"dt",
")",
"dir_size",
"=",
"fp",
".",
"tell",
"(",
")",
"-",
"dir_offset",
"write_end_of_directory",
"(",
"fp",
",",
"dir_size",
",",
"dir_offset",
",",
"len",
"(",
"dir_data_list",
")",
")",
"fp",
".",
"truncate",
"(",
")"
] | 50.279412 | 24.132353 |
def latinize(value):
"""
Converts (transliterates) greek letters to latin equivalents.
"""
def replace_double_character(match):
search = ('Θ Χ Ψ '
'θ χ ψ '
'ΟΥ ΑΥ ΕΥ '
'Ου Αυ Ευ '
'ου αυ ευ').split()
replace = ('TH CH PS '
'th ch ps '
'OU AU EU '
'Ou Au Eu '
'ou au eu').split()
matched = match.group(0)
if matched in search:
return replace[search.index(matched)]
return matched
search = 'ΑΒΓΔΕΖΗΙΚΛΜΝΞΟΠΡΣΣΤΥΦΩαβγδεζηικλμνξοπρσςτυφω'
replace = 'AVGDEZIIKLMNXOPRSSTUFOavgdeziiklmnxoprsstyfo'
def replace_greek_character(match):
matched = list(match.group(0))
value = map(lambda l: replace[search.find(l)], matched)
return ''.join(value)
return re.sub(r'[{0}]+'.format(search),
replace_greek_character, re.sub(
r'([ΘΧΨθχψ]+|ΟΥ|ΑΥ|ΕΥ|Ου|Αυ|Ευ|ου|αυ|ευ)',
replace_double_character,
remove_accents(value)))
|
[
"def",
"latinize",
"(",
"value",
")",
":",
"def",
"replace_double_character",
"(",
"match",
")",
":",
"search",
"=",
"(",
"'Θ Χ Ψ '",
"'θ χ ψ '",
"'ΟΥ ΑΥ ΕΥ '",
"'Ου Αυ Ευ '",
"'ου αυ ευ').spli",
"t",
"(",
")",
"",
"",
"replace",
"=",
"(",
"'TH CH PS '",
"'th ch ps '",
"'OU AU EU '",
"'Ou Au Eu '",
"'ou au eu'",
")",
".",
"split",
"(",
")",
"matched",
"=",
"match",
".",
"group",
"(",
"0",
")",
"if",
"matched",
"in",
"search",
":",
"return",
"replace",
"[",
"search",
".",
"index",
"(",
"matched",
")",
"]",
"return",
"matched",
"search",
"=",
"'ΑΒΓΔΕΖΗΙΚΛΜΝΞΟΠΡΣΣΤΥΦΩαβγδεζηικλμνξοπρσςτυφω'",
"replace",
"=",
"'AVGDEZIIKLMNXOPRSSTUFOavgdeziiklmnxoprsstyfo'",
"def",
"replace_greek_character",
"(",
"match",
")",
":",
"matched",
"=",
"list",
"(",
"match",
".",
"group",
"(",
"0",
")",
")",
"value",
"=",
"map",
"(",
"lambda",
"l",
":",
"replace",
"[",
"search",
".",
"find",
"(",
"l",
")",
"]",
",",
"matched",
")",
"return",
"''",
".",
"join",
"(",
"value",
")",
"return",
"re",
".",
"sub",
"(",
"r'[{0}]+'",
".",
"format",
"(",
"search",
")",
",",
"replace_greek_character",
",",
"re",
".",
"sub",
"(",
"r'([ΘΧΨθχψ]+|ΟΥ|ΑΥ|ΕΥ|Ου|Αυ|Ευ|ου|αυ|ευ)',",
"",
"replace_double_character",
",",
"remove_accents",
"(",
"value",
")",
")",
")"
] | 32.454545 | 12.757576 |
def extrude(self, uem, reference, collar=0.0, skip_overlap=False):
"""Extrude reference boundary collars from uem
reference |----| |--------------| |-------------|
uem |---------------------| |-------------------------------|
extruded |--| |--| |---| |-----| |-| |-----| |-----------| |-----|
Parameters
----------
uem : Timeline
Evaluation map.
reference : Annotation
Reference annotation.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
Returns
-------
extruded_uem : Timeline
"""
if collar == 0. and not skip_overlap:
return uem
collars, overlap_regions = [], []
# build list of collars if needed
if collar > 0.:
# iterate over all segments in reference
for segment in reference.itersegments():
# add collar centered on start time
t = segment.start
collars.append(Segment(t - .5 * collar, t + .5 * collar))
# add collar centered on end time
t = segment.end
collars.append(Segment(t - .5 * collar, t + .5 * collar))
# build list of overlap regions if needed
if skip_overlap:
# iterate over pair of intersecting segments
for (segment1, track1), (segment2, track2) in reference.co_iter(reference):
if segment1 == segment2 and track1 == track2:
continue
# add their intersection
overlap_regions.append(segment1 & segment2)
segments = collars + overlap_regions
return Timeline(segments=segments).support().gaps(support=uem)
|
[
"def",
"extrude",
"(",
"self",
",",
"uem",
",",
"reference",
",",
"collar",
"=",
"0.0",
",",
"skip_overlap",
"=",
"False",
")",
":",
"if",
"collar",
"==",
"0.",
"and",
"not",
"skip_overlap",
":",
"return",
"uem",
"collars",
",",
"overlap_regions",
"=",
"[",
"]",
",",
"[",
"]",
"# build list of collars if needed",
"if",
"collar",
">",
"0.",
":",
"# iterate over all segments in reference",
"for",
"segment",
"in",
"reference",
".",
"itersegments",
"(",
")",
":",
"# add collar centered on start time",
"t",
"=",
"segment",
".",
"start",
"collars",
".",
"append",
"(",
"Segment",
"(",
"t",
"-",
".5",
"*",
"collar",
",",
"t",
"+",
".5",
"*",
"collar",
")",
")",
"# add collar centered on end time",
"t",
"=",
"segment",
".",
"end",
"collars",
".",
"append",
"(",
"Segment",
"(",
"t",
"-",
".5",
"*",
"collar",
",",
"t",
"+",
".5",
"*",
"collar",
")",
")",
"# build list of overlap regions if needed",
"if",
"skip_overlap",
":",
"# iterate over pair of intersecting segments",
"for",
"(",
"segment1",
",",
"track1",
")",
",",
"(",
"segment2",
",",
"track2",
")",
"in",
"reference",
".",
"co_iter",
"(",
"reference",
")",
":",
"if",
"segment1",
"==",
"segment2",
"and",
"track1",
"==",
"track2",
":",
"continue",
"# add their intersection",
"overlap_regions",
".",
"append",
"(",
"segment1",
"&",
"segment2",
")",
"segments",
"=",
"collars",
"+",
"overlap_regions",
"return",
"Timeline",
"(",
"segments",
"=",
"segments",
")",
".",
"support",
"(",
")",
".",
"gaps",
"(",
"support",
"=",
"uem",
")"
] | 36.821429 | 21.410714 |
def send_response_only(self, code, message=None):
"""Send the response header only."""
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append(("%s %d %s\r\n" %
(self.protocol_version, code, message)).encode(
'latin-1', 'strict'))
|
[
"def",
"send_response_only",
"(",
"self",
",",
"code",
",",
"message",
"=",
"None",
")",
":",
"if",
"message",
"is",
"None",
":",
"if",
"code",
"in",
"self",
".",
"responses",
":",
"message",
"=",
"self",
".",
"responses",
"[",
"code",
"]",
"[",
"0",
"]",
"else",
":",
"message",
"=",
"''",
"if",
"self",
".",
"request_version",
"!=",
"'HTTP/0.9'",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_headers_buffer'",
")",
":",
"self",
".",
"_headers_buffer",
"=",
"[",
"]",
"self",
".",
"_headers_buffer",
".",
"append",
"(",
"(",
"\"%s %d %s\\r\\n\"",
"%",
"(",
"self",
".",
"protocol_version",
",",
"code",
",",
"message",
")",
")",
".",
"encode",
"(",
"'latin-1'",
",",
"'strict'",
")",
")"
] | 43.076923 | 10.461538 |
def lock_file(f, block=False):
"""
If block=False (the default), die hard and fast if another process has
already grabbed the lock for this file.
If block=True, wait for the lock to be released, then continue.
"""
try:
flags = fcntl.LOCK_EX
if not block:
flags |= fcntl.LOCK_NB
fcntl.flock(f.fileno(), flags)
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
raise SystemExit("ERROR: %s is locked by another process." %
f.name)
raise
|
[
"def",
"lock_file",
"(",
"f",
",",
"block",
"=",
"False",
")",
":",
"try",
":",
"flags",
"=",
"fcntl",
".",
"LOCK_EX",
"if",
"not",
"block",
":",
"flags",
"|=",
"fcntl",
".",
"LOCK_NB",
"fcntl",
".",
"flock",
"(",
"f",
".",
"fileno",
"(",
")",
",",
"flags",
")",
"except",
"IOError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"in",
"(",
"errno",
".",
"EACCES",
",",
"errno",
".",
"EAGAIN",
")",
":",
"raise",
"SystemExit",
"(",
"\"ERROR: %s is locked by another process.\"",
"%",
"f",
".",
"name",
")",
"raise"
] | 32.588235 | 16.117647 |
def _validate_plan_base(
new_plan,
base_plan,
is_partition_subset=True,
allow_rf_change=False,
):
"""Validate if given plan is valid comparing with given base-plan.
Validate following assertions:
- Partition-check: New partition-set should be subset of base-partition set
- Replica-count check: Replication-factor for each partition remains same
- Broker-check: New broker-set should be subset of base broker-set
"""
# Verify that partitions in plan are subset of base plan.
new_partitions = set([
(p_data['topic'], p_data['partition'])
for p_data in new_plan['partitions']
])
base_partitions = set([
(p_data['topic'], p_data['partition'])
for p_data in base_plan['partitions']
])
if is_partition_subset:
invalid_partitions = list(new_partitions - base_partitions)
else:
# partition set should be equal
invalid_partitions = list(
new_partitions.union(base_partitions) -
new_partitions.intersection(base_partitions),
)
if invalid_partitions:
_log.error(
'Invalid partition(s) found: {p_list}'.format(
p_list=invalid_partitions,
)
)
return False
# Verify replication-factor remains consistent
base_partition_replicas = {
(p_data['topic'], p_data['partition']): p_data['replicas']
for p_data in base_plan['partitions']
}
new_partition_replicas = {
(p_data['topic'], p_data['partition']): p_data['replicas']
for p_data in new_plan['partitions']
}
if not allow_rf_change:
invalid_replication_factor = False
for new_partition, replicas in six.iteritems(new_partition_replicas):
base_replica_cnt = len(base_partition_replicas[new_partition])
if len(replicas) != base_replica_cnt:
invalid_replication_factor = True
_log.error(
'Replication-factor Mismatch: Partition: {partition}: '
'Base-replicas: {expected}, Proposed-replicas: {actual}'
.format(
partition=new_partition,
expected=base_partition_replicas[new_partition],
actual=replicas,
),
)
if invalid_replication_factor:
return False
# Validation successful
return True
|
[
"def",
"_validate_plan_base",
"(",
"new_plan",
",",
"base_plan",
",",
"is_partition_subset",
"=",
"True",
",",
"allow_rf_change",
"=",
"False",
",",
")",
":",
"# Verify that partitions in plan are subset of base plan.",
"new_partitions",
"=",
"set",
"(",
"[",
"(",
"p_data",
"[",
"'topic'",
"]",
",",
"p_data",
"[",
"'partition'",
"]",
")",
"for",
"p_data",
"in",
"new_plan",
"[",
"'partitions'",
"]",
"]",
")",
"base_partitions",
"=",
"set",
"(",
"[",
"(",
"p_data",
"[",
"'topic'",
"]",
",",
"p_data",
"[",
"'partition'",
"]",
")",
"for",
"p_data",
"in",
"base_plan",
"[",
"'partitions'",
"]",
"]",
")",
"if",
"is_partition_subset",
":",
"invalid_partitions",
"=",
"list",
"(",
"new_partitions",
"-",
"base_partitions",
")",
"else",
":",
"# partition set should be equal",
"invalid_partitions",
"=",
"list",
"(",
"new_partitions",
".",
"union",
"(",
"base_partitions",
")",
"-",
"new_partitions",
".",
"intersection",
"(",
"base_partitions",
")",
",",
")",
"if",
"invalid_partitions",
":",
"_log",
".",
"error",
"(",
"'Invalid partition(s) found: {p_list}'",
".",
"format",
"(",
"p_list",
"=",
"invalid_partitions",
",",
")",
")",
"return",
"False",
"# Verify replication-factor remains consistent",
"base_partition_replicas",
"=",
"{",
"(",
"p_data",
"[",
"'topic'",
"]",
",",
"p_data",
"[",
"'partition'",
"]",
")",
":",
"p_data",
"[",
"'replicas'",
"]",
"for",
"p_data",
"in",
"base_plan",
"[",
"'partitions'",
"]",
"}",
"new_partition_replicas",
"=",
"{",
"(",
"p_data",
"[",
"'topic'",
"]",
",",
"p_data",
"[",
"'partition'",
"]",
")",
":",
"p_data",
"[",
"'replicas'",
"]",
"for",
"p_data",
"in",
"new_plan",
"[",
"'partitions'",
"]",
"}",
"if",
"not",
"allow_rf_change",
":",
"invalid_replication_factor",
"=",
"False",
"for",
"new_partition",
",",
"replicas",
"in",
"six",
".",
"iteritems",
"(",
"new_partition_replicas",
")",
":",
"base_replica_cnt",
"=",
"len",
"(",
"base_partition_replicas",
"[",
"new_partition",
"]",
")",
"if",
"len",
"(",
"replicas",
")",
"!=",
"base_replica_cnt",
":",
"invalid_replication_factor",
"=",
"True",
"_log",
".",
"error",
"(",
"'Replication-factor Mismatch: Partition: {partition}: '",
"'Base-replicas: {expected}, Proposed-replicas: {actual}'",
".",
"format",
"(",
"partition",
"=",
"new_partition",
",",
"expected",
"=",
"base_partition_replicas",
"[",
"new_partition",
"]",
",",
"actual",
"=",
"replicas",
",",
")",
",",
")",
"if",
"invalid_replication_factor",
":",
"return",
"False",
"# Validation successful",
"return",
"True"
] | 35.308824 | 19.235294 |
def wr_txt(self, fout_txt="gos_depth01.txt", title=None):
"""write text table of depth-01 GO terms and their letter representation."""
with open(fout_txt, 'w') as prt:
self.prt_header(prt, title)
data_nts = self.prt_txt(prt)
sys.stdout.write(" {N:>5} items WROTE: {TXT}\n".format(
N=len(data_nts), TXT=fout_txt))
|
[
"def",
"wr_txt",
"(",
"self",
",",
"fout_txt",
"=",
"\"gos_depth01.txt\"",
",",
"title",
"=",
"None",
")",
":",
"with",
"open",
"(",
"fout_txt",
",",
"'w'",
")",
"as",
"prt",
":",
"self",
".",
"prt_header",
"(",
"prt",
",",
"title",
")",
"data_nts",
"=",
"self",
".",
"prt_txt",
"(",
"prt",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" {N:>5} items WROTE: {TXT}\\n\"",
".",
"format",
"(",
"N",
"=",
"len",
"(",
"data_nts",
")",
",",
"TXT",
"=",
"fout_txt",
")",
")"
] | 53.571429 | 7.571429 |
def get_sources(arxiv_id):
"""
Download sources on arXiv for a given preprint.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param eprint: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in a \
canonical form.
:returns: A ``TarFile`` object of the sources of the arXiv preprint or \
``None``.
"""
try:
request = requests.get(ARXIV_EPRINT_URL.format(arxiv_id=arxiv_id))
request.raise_for_status()
file_object = io.BytesIO(request.content)
return tarfile.open(fileobj=file_object)
except (RequestException, AssertionError, tarfile.TarError):
return None
|
[
"def",
"get_sources",
"(",
"arxiv_id",
")",
":",
"try",
":",
"request",
"=",
"requests",
".",
"get",
"(",
"ARXIV_EPRINT_URL",
".",
"format",
"(",
"arxiv_id",
"=",
"arxiv_id",
")",
")",
"request",
".",
"raise_for_status",
"(",
")",
"file_object",
"=",
"io",
".",
"BytesIO",
"(",
"request",
".",
"content",
")",
"return",
"tarfile",
".",
"open",
"(",
"fileobj",
"=",
"file_object",
")",
"except",
"(",
"RequestException",
",",
"AssertionError",
",",
"tarfile",
".",
"TarError",
")",
":",
"return",
"None"
] | 35.952381 | 23.095238 |
def get_resources_of_type(network_id, type_id, **kwargs):
"""
Return the Nodes, Links and ResourceGroups which
have the type specified.
"""
nodes_with_type = db.DBSession.query(Node).join(ResourceType).filter(Node.network_id==network_id, ResourceType.type_id==type_id).all()
links_with_type = db.DBSession.query(Link).join(ResourceType).filter(Link.network_id==network_id, ResourceType.type_id==type_id).all()
groups_with_type = db.DBSession.query(ResourceGroup).join(ResourceType).filter(ResourceGroup.network_id==network_id, ResourceType.type_id==type_id).all()
return nodes_with_type, links_with_type, groups_with_type
|
[
"def",
"get_resources_of_type",
"(",
"network_id",
",",
"type_id",
",",
"*",
"*",
"kwargs",
")",
":",
"nodes_with_type",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"Node",
")",
".",
"join",
"(",
"ResourceType",
")",
".",
"filter",
"(",
"Node",
".",
"network_id",
"==",
"network_id",
",",
"ResourceType",
".",
"type_id",
"==",
"type_id",
")",
".",
"all",
"(",
")",
"links_with_type",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"Link",
")",
".",
"join",
"(",
"ResourceType",
")",
".",
"filter",
"(",
"Link",
".",
"network_id",
"==",
"network_id",
",",
"ResourceType",
".",
"type_id",
"==",
"type_id",
")",
".",
"all",
"(",
")",
"groups_with_type",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"ResourceGroup",
")",
".",
"join",
"(",
"ResourceType",
")",
".",
"filter",
"(",
"ResourceGroup",
".",
"network_id",
"==",
"network_id",
",",
"ResourceType",
".",
"type_id",
"==",
"type_id",
")",
".",
"all",
"(",
")",
"return",
"nodes_with_type",
",",
"links_with_type",
",",
"groups_with_type"
] | 59.363636 | 41.363636 |
def plot_posterior_contour(self, idx_param1=0, idx_param2=1, res1=100, res2=100, smoothing=0.01):
"""
Plots a contour of the kernel density estimation
of a 2D projection of the current posterior distribution.
:param int idx_param1: Parameter to be treated as :math:`x` when
plotting.
:param int idx_param2: Parameter to be treated as :math:`y` when
plotting.
:param int res1: Resolution along the :math:`x` direction.
:param int res2: Resolution along the :math:`y` direction.
:param float smoothing: Standard deviation of the Gaussian kernel
used to smooth the particle approximation to the current posterior.
.. seealso::
:meth:`SMCUpdater.posterior_mesh`
"""
return plt.contour(*self.posterior_mesh(idx_param1, idx_param2, res1, res2, smoothing))
|
[
"def",
"plot_posterior_contour",
"(",
"self",
",",
"idx_param1",
"=",
"0",
",",
"idx_param2",
"=",
"1",
",",
"res1",
"=",
"100",
",",
"res2",
"=",
"100",
",",
"smoothing",
"=",
"0.01",
")",
":",
"return",
"plt",
".",
"contour",
"(",
"*",
"self",
".",
"posterior_mesh",
"(",
"idx_param1",
",",
"idx_param2",
",",
"res1",
",",
"res2",
",",
"smoothing",
")",
")"
] | 45.789474 | 27.578947 |
def editPlan(self, plan, new_plan):
"""
Edits a plan
:param plan: Plan to edit
:param new_plan: New plan
:type plan: models.Plan
:raises: FBchatException if request failed
"""
data = {
"event_reminder_id": plan.uid,
"delete": "false",
"date": new_plan.time,
"location_name": new_plan.location or "",
"location_id": new_plan.location_id or "",
"title": new_plan.title,
"acontext": ACONTEXT,
}
j = self._post(self.req_url.PLAN_CHANGE, data, fix_request=True, as_json=True)
|
[
"def",
"editPlan",
"(",
"self",
",",
"plan",
",",
"new_plan",
")",
":",
"data",
"=",
"{",
"\"event_reminder_id\"",
":",
"plan",
".",
"uid",
",",
"\"delete\"",
":",
"\"false\"",
",",
"\"date\"",
":",
"new_plan",
".",
"time",
",",
"\"location_name\"",
":",
"new_plan",
".",
"location",
"or",
"\"\"",
",",
"\"location_id\"",
":",
"new_plan",
".",
"location_id",
"or",
"\"\"",
",",
"\"title\"",
":",
"new_plan",
".",
"title",
",",
"\"acontext\"",
":",
"ACONTEXT",
",",
"}",
"j",
"=",
"self",
".",
"_post",
"(",
"self",
".",
"req_url",
".",
"PLAN_CHANGE",
",",
"data",
",",
"fix_request",
"=",
"True",
",",
"as_json",
"=",
"True",
")"
] | 32.473684 | 13.421053 |
def on_train_begin(self, pbar, **kwargs:Any)->None:
"Initialize optimizer and learner hyperparameters."
setattr(pbar, 'clean_on_interrupt', True)
self.learn.save('tmp')
self.opt = self.learn.opt
self.opt.lr = self.sched.start
self.stop,self.best_loss = False,0.
return {'skip_validate': True}
|
[
"def",
"on_train_begin",
"(",
"self",
",",
"pbar",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"None",
":",
"setattr",
"(",
"pbar",
",",
"'clean_on_interrupt'",
",",
"True",
")",
"self",
".",
"learn",
".",
"save",
"(",
"'tmp'",
")",
"self",
".",
"opt",
"=",
"self",
".",
"learn",
".",
"opt",
"self",
".",
"opt",
".",
"lr",
"=",
"self",
".",
"sched",
".",
"start",
"self",
".",
"stop",
",",
"self",
".",
"best_loss",
"=",
"False",
",",
"0.",
"return",
"{",
"'skip_validate'",
":",
"True",
"}"
] | 42.625 | 7.875 |
def setup():
"""
Creates required directories and copy checkers and reports.
"""
# # Check if dir is writable
# if not os.access(AtomShieldsScanner.HOME, os.W_OK):
# AtomShieldsScanner.HOME = os.path.expanduser("~/.atomshields")
# AtomShieldsScanner.CHECKERS_DIR = os.path.join(AtomShieldsScanner.HOME, "checkers")
# AtomShieldsScanner.REPORTS_DIR = os.path.join(AtomShieldsScanner.HOME, "reports")
if not os.path.isdir(AtomShieldsScanner.CHECKERS_DIR):
os.makedirs(AtomShieldsScanner.CHECKERS_DIR)
if not os.path.isdir(AtomShieldsScanner.REPORTS_DIR):
os.makedirs(AtomShieldsScanner.REPORTS_DIR)
# Copy all checkers
for f in AtomShieldsScanner._getFiles(os.path.join(os.path.dirname(os.path.realpath(__file__)), "checkers"), "*.py"):
AtomShieldsScanner.installChecker(f)
# Copy all reports
for f in AtomShieldsScanner._getFiles(os.path.join(os.path.dirname(os.path.realpath(__file__)), "reports"), "*.py"):
AtomShieldsScanner.installReport(f)
AtomShieldsScanner._executeMassiveMethod(path=AtomShieldsScanner.CHECKERS_DIR, method="install", args={})
config_dir = os.path.dirname(AtomShieldsScanner.CONFIG_PATH)
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
|
[
"def",
"setup",
"(",
")",
":",
"# # Check if dir is writable",
"# if not os.access(AtomShieldsScanner.HOME, os.W_OK):",
"# \tAtomShieldsScanner.HOME = os.path.expanduser(\"~/.atomshields\")",
"# \tAtomShieldsScanner.CHECKERS_DIR = os.path.join(AtomShieldsScanner.HOME, \"checkers\")",
"# \tAtomShieldsScanner.REPORTS_DIR = os.path.join(AtomShieldsScanner.HOME, \"reports\")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"AtomShieldsScanner",
".",
"CHECKERS_DIR",
")",
":",
"os",
".",
"makedirs",
"(",
"AtomShieldsScanner",
".",
"CHECKERS_DIR",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"AtomShieldsScanner",
".",
"REPORTS_DIR",
")",
":",
"os",
".",
"makedirs",
"(",
"AtomShieldsScanner",
".",
"REPORTS_DIR",
")",
"# Copy all checkers",
"for",
"f",
"in",
"AtomShieldsScanner",
".",
"_getFiles",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
",",
"\"checkers\"",
")",
",",
"\"*.py\"",
")",
":",
"AtomShieldsScanner",
".",
"installChecker",
"(",
"f",
")",
"# Copy all reports",
"for",
"f",
"in",
"AtomShieldsScanner",
".",
"_getFiles",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
",",
"\"reports\"",
")",
",",
"\"*.py\"",
")",
":",
"AtomShieldsScanner",
".",
"installReport",
"(",
"f",
")",
"AtomShieldsScanner",
".",
"_executeMassiveMethod",
"(",
"path",
"=",
"AtomShieldsScanner",
".",
"CHECKERS_DIR",
",",
"method",
"=",
"\"install\"",
",",
"args",
"=",
"{",
"}",
")",
"config_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"AtomShieldsScanner",
".",
"CONFIG_PATH",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"config_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"config_dir",
")"
] | 37.46875 | 28.34375 |
def memory_full():
"""Check if the memory is too full for further caching."""
current_process = psutil.Process(os.getpid())
return (current_process.memory_percent() >
config.MAXIMUM_CACHE_MEMORY_PERCENTAGE)
|
[
"def",
"memory_full",
"(",
")",
":",
"current_process",
"=",
"psutil",
".",
"Process",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"return",
"(",
"current_process",
".",
"memory_percent",
"(",
")",
">",
"config",
".",
"MAXIMUM_CACHE_MEMORY_PERCENTAGE",
")"
] | 45.2 | 9.6 |
def create_upload_url(self, upload_id, number, size, hash_value, hash_alg):
"""
Given an upload created by create_upload retrieve a url where we can upload a chunk.
:param upload_id: uuid of the upload
:param number: int incrementing number of the upload (1-based index)
:param size: int size of the chunk in bytes
:param hash_value: str hash value of chunk
:param hash_alg: str algorithm used to create hash
:return: requests.Response containing the successful result
"""
if number < 1:
raise ValueError("Chunk number must be > 0")
data = {
"number": number,
"size": size,
"hash": {
"value": hash_value,
"algorithm": hash_alg
}
}
return self._put("/uploads/" + upload_id + "/chunks", data)
|
[
"def",
"create_upload_url",
"(",
"self",
",",
"upload_id",
",",
"number",
",",
"size",
",",
"hash_value",
",",
"hash_alg",
")",
":",
"if",
"number",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Chunk number must be > 0\"",
")",
"data",
"=",
"{",
"\"number\"",
":",
"number",
",",
"\"size\"",
":",
"size",
",",
"\"hash\"",
":",
"{",
"\"value\"",
":",
"hash_value",
",",
"\"algorithm\"",
":",
"hash_alg",
"}",
"}",
"return",
"self",
".",
"_put",
"(",
"\"/uploads/\"",
"+",
"upload_id",
"+",
"\"/chunks\"",
",",
"data",
")"
] | 41.238095 | 18.47619 |
def clusterflow_pipelines_section(self):
""" Generate HTML for section about pipelines, generated from
information parsed from run files. """
data = dict()
pids_guessed = ''
for f,d in self.clusterflow_runfiles.items():
pid = d.get('pipeline_id', 'unknown')
if d.get('pipeline_id_guess', False) is True:
pid += '*'
pids_guessed = ' Project IDs with an asterisk may be inaccurate.'
# Count the number of files going into the first module
num_starting_files = 0
for step_name, files in d.get('files',{}).items():
if step_name.startswith('start'):
num_starting_files += len(files)
# Reformat the date so that column sorting works nicely
if 'pipeline_start_dateparts' in d:
dt = d['pipeline_start_dateparts']
d['pipeline_start'] = '{}-{:02d}-{:02d} {:02d}:{:02d}'.format(dt['year'], dt['month'], dt['day'], dt['hour'], dt['minute'])
if pid not in data:
data[pid] = d
data[pid]['num_starting_files'] = int(num_starting_files)
else:
data[pid]['num_starting_files'] += int(num_starting_files)
headers = OrderedDict()
headers['pipeline_name'] = {'title': 'Pipeline Name'}
headers['pipeline_start'] = {'title': 'Date Started', 'description': 'Date and time that pipeline was started (YYYY-MM-DD HH:SS)'}
headers['genome'] = {'title': 'Genome ID', 'description': 'ID of reference genome used'}
headers['num_starting_files'] = {'title': '# Starting Files', 'format': '{:,.0f}', 'description': 'Number of input files at start of pipeline run.'}
table_config = {
'namespace': 'Cluster Flow',
'id': 'clusterflow-pipelines-table',
'table_title': 'Cluster Flow Pipelines',
'col1_header': 'Pipeline ID',
'no_beeswarm': True,
'save_file': True
}
self.add_section (
name = 'Pipelines',
anchor = 'clusterflow-pipelines',
description = 'Information about pipelines is parsed from <code>*.run</code> files. {}'.format(pids_guessed),
plot = table.plot(data, headers, table_config),
content = self.clusterflow_pipelines_printout()
)
|
[
"def",
"clusterflow_pipelines_section",
"(",
"self",
")",
":",
"data",
"=",
"dict",
"(",
")",
"pids_guessed",
"=",
"''",
"for",
"f",
",",
"d",
"in",
"self",
".",
"clusterflow_runfiles",
".",
"items",
"(",
")",
":",
"pid",
"=",
"d",
".",
"get",
"(",
"'pipeline_id'",
",",
"'unknown'",
")",
"if",
"d",
".",
"get",
"(",
"'pipeline_id_guess'",
",",
"False",
")",
"is",
"True",
":",
"pid",
"+=",
"'*'",
"pids_guessed",
"=",
"' Project IDs with an asterisk may be inaccurate.'",
"# Count the number of files going into the first module",
"num_starting_files",
"=",
"0",
"for",
"step_name",
",",
"files",
"in",
"d",
".",
"get",
"(",
"'files'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"if",
"step_name",
".",
"startswith",
"(",
"'start'",
")",
":",
"num_starting_files",
"+=",
"len",
"(",
"files",
")",
"# Reformat the date so that column sorting works nicely",
"if",
"'pipeline_start_dateparts'",
"in",
"d",
":",
"dt",
"=",
"d",
"[",
"'pipeline_start_dateparts'",
"]",
"d",
"[",
"'pipeline_start'",
"]",
"=",
"'{}-{:02d}-{:02d} {:02d}:{:02d}'",
".",
"format",
"(",
"dt",
"[",
"'year'",
"]",
",",
"dt",
"[",
"'month'",
"]",
",",
"dt",
"[",
"'day'",
"]",
",",
"dt",
"[",
"'hour'",
"]",
",",
"dt",
"[",
"'minute'",
"]",
")",
"if",
"pid",
"not",
"in",
"data",
":",
"data",
"[",
"pid",
"]",
"=",
"d",
"data",
"[",
"pid",
"]",
"[",
"'num_starting_files'",
"]",
"=",
"int",
"(",
"num_starting_files",
")",
"else",
":",
"data",
"[",
"pid",
"]",
"[",
"'num_starting_files'",
"]",
"+=",
"int",
"(",
"num_starting_files",
")",
"headers",
"=",
"OrderedDict",
"(",
")",
"headers",
"[",
"'pipeline_name'",
"]",
"=",
"{",
"'title'",
":",
"'Pipeline Name'",
"}",
"headers",
"[",
"'pipeline_start'",
"]",
"=",
"{",
"'title'",
":",
"'Date Started'",
",",
"'description'",
":",
"'Date and time that pipeline was started (YYYY-MM-DD HH:SS)'",
"}",
"headers",
"[",
"'genome'",
"]",
"=",
"{",
"'title'",
":",
"'Genome ID'",
",",
"'description'",
":",
"'ID of reference genome used'",
"}",
"headers",
"[",
"'num_starting_files'",
"]",
"=",
"{",
"'title'",
":",
"'# Starting Files'",
",",
"'format'",
":",
"'{:,.0f}'",
",",
"'description'",
":",
"'Number of input files at start of pipeline run.'",
"}",
"table_config",
"=",
"{",
"'namespace'",
":",
"'Cluster Flow'",
",",
"'id'",
":",
"'clusterflow-pipelines-table'",
",",
"'table_title'",
":",
"'Cluster Flow Pipelines'",
",",
"'col1_header'",
":",
"'Pipeline ID'",
",",
"'no_beeswarm'",
":",
"True",
",",
"'save_file'",
":",
"True",
"}",
"self",
".",
"add_section",
"(",
"name",
"=",
"'Pipelines'",
",",
"anchor",
"=",
"'clusterflow-pipelines'",
",",
"description",
"=",
"'Information about pipelines is parsed from <code>*.run</code> files. {}'",
".",
"format",
"(",
"pids_guessed",
")",
",",
"plot",
"=",
"table",
".",
"plot",
"(",
"data",
",",
"headers",
",",
"table_config",
")",
",",
"content",
"=",
"self",
".",
"clusterflow_pipelines_printout",
"(",
")",
")"
] | 52.555556 | 23.6 |
def set_json(self, reason='', new_page=False):
"""Send the JSON from the cache to the usernotes wiki page.
Arguments:
reason: the change reason that will be posted to the wiki changelog
(str)
Raises:
OverflowError if the new JSON data is greater than max_page_size
"""
compressed_json = json.dumps(self._compress_json(self.cached_json))
if len(compressed_json) > self.max_page_size:
raise OverflowError(
'Usernotes page is too large (>{0} characters)'.
format(self.max_page_size)
)
if new_page:
self.subreddit.wiki.create(
self.page_name,
compressed_json,
reason
)
# Set the page as hidden and available to moderators only
self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2)
else:
self.subreddit.wiki[self.page_name].edit(
compressed_json,
reason
)
|
[
"def",
"set_json",
"(",
"self",
",",
"reason",
"=",
"''",
",",
"new_page",
"=",
"False",
")",
":",
"compressed_json",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"_compress_json",
"(",
"self",
".",
"cached_json",
")",
")",
"if",
"len",
"(",
"compressed_json",
")",
">",
"self",
".",
"max_page_size",
":",
"raise",
"OverflowError",
"(",
"'Usernotes page is too large (>{0} characters)'",
".",
"format",
"(",
"self",
".",
"max_page_size",
")",
")",
"if",
"new_page",
":",
"self",
".",
"subreddit",
".",
"wiki",
".",
"create",
"(",
"self",
".",
"page_name",
",",
"compressed_json",
",",
"reason",
")",
"# Set the page as hidden and available to moderators only",
"self",
".",
"subreddit",
".",
"wiki",
"[",
"self",
".",
"page_name",
"]",
".",
"mod",
".",
"update",
"(",
"False",
",",
"permlevel",
"=",
"2",
")",
"else",
":",
"self",
".",
"subreddit",
".",
"wiki",
"[",
"self",
".",
"page_name",
"]",
".",
"edit",
"(",
"compressed_json",
",",
"reason",
")"
] | 34.966667 | 20.633333 |
def update_buttons(self):
"""Updates the enable status of delete and reset buttons."""
current_scheme = self.current_scheme
names = self.get_option("names")
try:
names.pop(names.index(u'Custom'))
except ValueError:
pass
delete_enabled = current_scheme not in names
self.delete_button.setEnabled(delete_enabled)
self.reset_button.setEnabled(not delete_enabled)
|
[
"def",
"update_buttons",
"(",
"self",
")",
":",
"current_scheme",
"=",
"self",
".",
"current_scheme",
"names",
"=",
"self",
".",
"get_option",
"(",
"\"names\"",
")",
"try",
":",
"names",
".",
"pop",
"(",
"names",
".",
"index",
"(",
"u'Custom'",
")",
")",
"except",
"ValueError",
":",
"pass",
"delete_enabled",
"=",
"current_scheme",
"not",
"in",
"names",
"self",
".",
"delete_button",
".",
"setEnabled",
"(",
"delete_enabled",
")",
"self",
".",
"reset_button",
".",
"setEnabled",
"(",
"not",
"delete_enabled",
")"
] | 39.727273 | 11.909091 |
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
|
[
"def",
"findAllNext",
"(",
"self",
",",
"name",
"=",
"None",
",",
"attrs",
"=",
"{",
"}",
",",
"text",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_findAll",
"(",
"name",
",",
"attrs",
",",
"text",
",",
"limit",
",",
"self",
".",
"nextGenerator",
",",
"*",
"*",
"kwargs",
")"
] | 53 | 11.833333 |
def from_pdb(cls, path, forcefield=None, loader=PDBFile, strict=True, **kwargs):
"""
Loads topology, positions and, potentially, velocities and vectors,
from a PDB or PDBx file
Parameters
----------
path : str
Path to PDB/PDBx file
forcefields : list of str
Paths to FFXML and/or FRCMOD forcefields. REQUIRED.
Returns
-------
pdb : SystemHandler
SystemHandler with topology, positions, and, potentially, velocities and
box vectors. Forcefields are embedded in the `master` attribute.
"""
pdb = loader(path)
box = kwargs.pop('box', pdb.topology.getPeriodicBoxVectors())
positions = kwargs.pop('positions', pdb.positions)
velocities = kwargs.pop('velocities', getattr(pdb, 'velocities', None))
if strict and not forcefield:
from .md import FORCEFIELDS as forcefield
logger.info('! Forcefields for PDB not specified. Using default: %s',
', '.join(forcefield))
pdb.forcefield = ForceField(*list(process_forcefield(*forcefield)))
return cls(master=pdb.forcefield, topology=pdb.topology, positions=positions,
velocities=velocities, box=box, path=path, **kwargs)
|
[
"def",
"from_pdb",
"(",
"cls",
",",
"path",
",",
"forcefield",
"=",
"None",
",",
"loader",
"=",
"PDBFile",
",",
"strict",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"pdb",
"=",
"loader",
"(",
"path",
")",
"box",
"=",
"kwargs",
".",
"pop",
"(",
"'box'",
",",
"pdb",
".",
"topology",
".",
"getPeriodicBoxVectors",
"(",
")",
")",
"positions",
"=",
"kwargs",
".",
"pop",
"(",
"'positions'",
",",
"pdb",
".",
"positions",
")",
"velocities",
"=",
"kwargs",
".",
"pop",
"(",
"'velocities'",
",",
"getattr",
"(",
"pdb",
",",
"'velocities'",
",",
"None",
")",
")",
"if",
"strict",
"and",
"not",
"forcefield",
":",
"from",
".",
"md",
"import",
"FORCEFIELDS",
"as",
"forcefield",
"logger",
".",
"info",
"(",
"'! Forcefields for PDB not specified. Using default: %s'",
",",
"', '",
".",
"join",
"(",
"forcefield",
")",
")",
"pdb",
".",
"forcefield",
"=",
"ForceField",
"(",
"*",
"list",
"(",
"process_forcefield",
"(",
"*",
"forcefield",
")",
")",
")",
"return",
"cls",
"(",
"master",
"=",
"pdb",
".",
"forcefield",
",",
"topology",
"=",
"pdb",
".",
"topology",
",",
"positions",
"=",
"positions",
",",
"velocities",
"=",
"velocities",
",",
"box",
"=",
"box",
",",
"path",
"=",
"path",
",",
"*",
"*",
"kwargs",
")"
] | 41.548387 | 24.645161 |
def parse_selectors(model, fields=None, exclude=None, key_map=None, **options):
"""Validates fields are valid and maps pseudo-fields to actual fields
for a given model class.
"""
fields = fields or DEFAULT_SELECTORS
exclude = exclude or ()
key_map = key_map or {}
validated = []
for alias in fields:
# Map the output key name to the actual field/accessor name for
# the model
actual = key_map.get(alias, alias)
# Validate the field exists
cleaned = resolver.get_field(model, actual)
if cleaned is None:
raise AttributeError('The "{0}" attribute could not be found '
'on the model "{1}"'.format(actual, model))
# Mapped value, so use the original name listed in `fields`
if type(cleaned) is list:
validated.extend(cleaned)
elif alias != actual:
validated.append(alias)
else:
validated.append(cleaned)
return tuple([x for x in validated if x not in exclude])
|
[
"def",
"parse_selectors",
"(",
"model",
",",
"fields",
"=",
"None",
",",
"exclude",
"=",
"None",
",",
"key_map",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"fields",
"=",
"fields",
"or",
"DEFAULT_SELECTORS",
"exclude",
"=",
"exclude",
"or",
"(",
")",
"key_map",
"=",
"key_map",
"or",
"{",
"}",
"validated",
"=",
"[",
"]",
"for",
"alias",
"in",
"fields",
":",
"# Map the output key name to the actual field/accessor name for",
"# the model",
"actual",
"=",
"key_map",
".",
"get",
"(",
"alias",
",",
"alias",
")",
"# Validate the field exists",
"cleaned",
"=",
"resolver",
".",
"get_field",
"(",
"model",
",",
"actual",
")",
"if",
"cleaned",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"'The \"{0}\" attribute could not be found '",
"'on the model \"{1}\"'",
".",
"format",
"(",
"actual",
",",
"model",
")",
")",
"# Mapped value, so use the original name listed in `fields`",
"if",
"type",
"(",
"cleaned",
")",
"is",
"list",
":",
"validated",
".",
"extend",
"(",
"cleaned",
")",
"elif",
"alias",
"!=",
"actual",
":",
"validated",
".",
"append",
"(",
"alias",
")",
"else",
":",
"validated",
".",
"append",
"(",
"cleaned",
")",
"return",
"tuple",
"(",
"[",
"x",
"for",
"x",
"in",
"validated",
"if",
"x",
"not",
"in",
"exclude",
"]",
")"
] | 34.3 | 19.033333 |
def get_local_environnement(self):
"""
Mix the environment and the environment variables into a new local
environment dictionary
Note: We cannot just update the global os.environ because this
would effect all other checks.
:return: local environment variables
:rtype: dict
"""
# Do not use copy.copy() here, as the resulting copy still
# changes the real environment (it is still a os._Environment
# instance).
local_env = os.environ.copy()
for local_var in self.env:
local_env[local_var] = self.env[local_var]
return local_env
|
[
"def",
"get_local_environnement",
"(",
"self",
")",
":",
"# Do not use copy.copy() here, as the resulting copy still",
"# changes the real environment (it is still a os._Environment",
"# instance).",
"local_env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"for",
"local_var",
"in",
"self",
".",
"env",
":",
"local_env",
"[",
"local_var",
"]",
"=",
"self",
".",
"env",
"[",
"local_var",
"]",
"return",
"local_env"
] | 35.333333 | 16.666667 |
def __run_spark_submit(lane_yaml, dist_dir, spark_home, spark_args, silent):
"""
Submits the packaged application to spark using a `spark-submit` subprocess
Parameters
----------
lane_yaml (str): Path to the YAML lane definition file
dist_dir (str): Path to the directory where the packaged code is located
spark_args (str): String of any additional spark config args to be passed when submitting
silent (bool): Flag indicating whether job output should be printed to console
"""
# spark-submit binary
cmd = ['spark-submit' if spark_home is None else os.path.join(spark_home, 'bin/spark-submit')]
# Supplied spark arguments
if spark_args:
cmd += spark_args
# Packaged App & lane
cmd += ['--py-files', 'libs.zip,_framework.zip,tasks.zip', 'main.py']
cmd += ['--lane', lane_yaml]
logging.info('Submitting to Spark')
logging.debug(str(cmd))
# Submit
devnull = open(os.devnull, 'w')
outp = {'stderr': STDOUT, 'stdout': devnull} if silent else {}
call(cmd, cwd=dist_dir, env=MY_ENV, **outp)
devnull.close()
|
[
"def",
"__run_spark_submit",
"(",
"lane_yaml",
",",
"dist_dir",
",",
"spark_home",
",",
"spark_args",
",",
"silent",
")",
":",
"# spark-submit binary",
"cmd",
"=",
"[",
"'spark-submit'",
"if",
"spark_home",
"is",
"None",
"else",
"os",
".",
"path",
".",
"join",
"(",
"spark_home",
",",
"'bin/spark-submit'",
")",
"]",
"# Supplied spark arguments",
"if",
"spark_args",
":",
"cmd",
"+=",
"spark_args",
"# Packaged App & lane",
"cmd",
"+=",
"[",
"'--py-files'",
",",
"'libs.zip,_framework.zip,tasks.zip'",
",",
"'main.py'",
"]",
"cmd",
"+=",
"[",
"'--lane'",
",",
"lane_yaml",
"]",
"logging",
".",
"info",
"(",
"'Submitting to Spark'",
")",
"logging",
".",
"debug",
"(",
"str",
"(",
"cmd",
")",
")",
"# Submit",
"devnull",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"outp",
"=",
"{",
"'stderr'",
":",
"STDOUT",
",",
"'stdout'",
":",
"devnull",
"}",
"if",
"silent",
"else",
"{",
"}",
"call",
"(",
"cmd",
",",
"cwd",
"=",
"dist_dir",
",",
"env",
"=",
"MY_ENV",
",",
"*",
"*",
"outp",
")",
"devnull",
".",
"close",
"(",
")"
] | 35.9 | 25.1 |
def strToUtf8(value):
'''
:type value: ``str``
:param value: value to encode
'''
kassert.is_of_types(value, str)
if sys.version_info < (3,):
return ''.join([unichr(ord(x)) for x in value])
return value
|
[
"def",
"strToUtf8",
"(",
"value",
")",
":",
"kassert",
".",
"is_of_types",
"(",
"value",
",",
"str",
")",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
":",
"return",
"''",
".",
"join",
"(",
"[",
"unichr",
"(",
"ord",
"(",
"x",
")",
")",
"for",
"x",
"in",
"value",
"]",
")",
"return",
"value"
] | 25.444444 | 17.888889 |
def seqingroups(groups,seq):
'helper for contigsub. takes the list of lists returned by groupelts and an array to check.\
returns (groupindex,indexingroup,matchlen) of longest match or None if no match'
if not (groups and seq): return None
bestmatch=None,None,0
if any(len(g)<2 for g in groups): raise ValueError('some subgroups have length < 2')
for i,g in filter(lambda x:x[1][0],enumerate(groups)): # i.e. we're only interested in groups with common elements
# begin starts at 0 so begin+1 starts at 1. (first elt of each group is the bool indicator)
begin=0
while 1:
try: begin=g.index(seq[0],begin+1)
except ValueError: break
jmax=min(len(g)-begin,len(seq))
for j in range(jmax):
if g[begin+j]!=seq[j]: break
else: j+=1 # so matchlen works below
matchlen=min(j,jmax)
if matchlen<bestmatch[2]: continue
bestmatch=[i,begin,matchlen] # note: begin is an offset including the initial bool
return bestmatch if bestmatch[2] else None
|
[
"def",
"seqingroups",
"(",
"groups",
",",
"seq",
")",
":",
"if",
"not",
"(",
"groups",
"and",
"seq",
")",
":",
"return",
"None",
"bestmatch",
"=",
"None",
",",
"None",
",",
"0",
"if",
"any",
"(",
"len",
"(",
"g",
")",
"<",
"2",
"for",
"g",
"in",
"groups",
")",
":",
"raise",
"ValueError",
"(",
"'some subgroups have length < 2'",
")",
"for",
"i",
",",
"g",
"in",
"filter",
"(",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"enumerate",
"(",
"groups",
")",
")",
":",
"# i.e. we're only interested in groups with common elements\r",
"# begin starts at 0 so begin+1 starts at 1. (first elt of each group is the bool indicator)\r",
"begin",
"=",
"0",
"while",
"1",
":",
"try",
":",
"begin",
"=",
"g",
".",
"index",
"(",
"seq",
"[",
"0",
"]",
",",
"begin",
"+",
"1",
")",
"except",
"ValueError",
":",
"break",
"jmax",
"=",
"min",
"(",
"len",
"(",
"g",
")",
"-",
"begin",
",",
"len",
"(",
"seq",
")",
")",
"for",
"j",
"in",
"range",
"(",
"jmax",
")",
":",
"if",
"g",
"[",
"begin",
"+",
"j",
"]",
"!=",
"seq",
"[",
"j",
"]",
":",
"break",
"else",
":",
"j",
"+=",
"1",
"# so matchlen works below\r",
"matchlen",
"=",
"min",
"(",
"j",
",",
"jmax",
")",
"if",
"matchlen",
"<",
"bestmatch",
"[",
"2",
"]",
":",
"continue",
"bestmatch",
"=",
"[",
"i",
",",
"begin",
",",
"matchlen",
"]",
"# note: begin is an offset including the initial bool\r",
"return",
"bestmatch",
"if",
"bestmatch",
"[",
"2",
"]",
"else",
"None"
] | 50.7 | 22.9 |
def bfs_multi_edges(G, source, reverse=False, keys=True, data=False):
"""Produce edges in a breadth-first-search starting at source.
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by D. Eppstein, July 2004.
"""
from collections import deque
from functools import partial
if reverse:
G = G.reverse()
edges_iter = partial(G.edges_iter, keys=keys, data=data)
list(G.edges_iter('multitest', keys=True, data=True))
visited_nodes = set([source])
# visited_edges = set([])
queue = deque([(source, edges_iter(source))])
while queue:
parent, edges = queue[0]
try:
edge = next(edges)
edge_nodata = edge[0:3]
# if edge_nodata not in visited_edges:
yield edge
# visited_edges.add(edge_nodata)
child = edge_nodata[1]
if child not in visited_nodes:
visited_nodes.add(child)
queue.append((child, edges_iter(child)))
except StopIteration:
queue.popleft()
|
[
"def",
"bfs_multi_edges",
"(",
"G",
",",
"source",
",",
"reverse",
"=",
"False",
",",
"keys",
"=",
"True",
",",
"data",
"=",
"False",
")",
":",
"from",
"collections",
"import",
"deque",
"from",
"functools",
"import",
"partial",
"if",
"reverse",
":",
"G",
"=",
"G",
".",
"reverse",
"(",
")",
"edges_iter",
"=",
"partial",
"(",
"G",
".",
"edges_iter",
",",
"keys",
"=",
"keys",
",",
"data",
"=",
"data",
")",
"list",
"(",
"G",
".",
"edges_iter",
"(",
"'multitest'",
",",
"keys",
"=",
"True",
",",
"data",
"=",
"True",
")",
")",
"visited_nodes",
"=",
"set",
"(",
"[",
"source",
"]",
")",
"# visited_edges = set([])",
"queue",
"=",
"deque",
"(",
"[",
"(",
"source",
",",
"edges_iter",
"(",
"source",
")",
")",
"]",
")",
"while",
"queue",
":",
"parent",
",",
"edges",
"=",
"queue",
"[",
"0",
"]",
"try",
":",
"edge",
"=",
"next",
"(",
"edges",
")",
"edge_nodata",
"=",
"edge",
"[",
"0",
":",
"3",
"]",
"# if edge_nodata not in visited_edges:",
"yield",
"edge",
"# visited_edges.add(edge_nodata)",
"child",
"=",
"edge_nodata",
"[",
"1",
"]",
"if",
"child",
"not",
"in",
"visited_nodes",
":",
"visited_nodes",
".",
"add",
"(",
"child",
")",
"queue",
".",
"append",
"(",
"(",
"child",
",",
"edges_iter",
"(",
"child",
")",
")",
")",
"except",
"StopIteration",
":",
"queue",
".",
"popleft",
"(",
")"
] | 33.516129 | 14.258065 |
def color_percentages(file_list, n_tasks=9, file_name="color_percent.png",
intensification_factor=1.2):
"""
Creates an image in which each cell in the avida grid is represented as
a square of 9 sub-cells. Each of these 9 sub-cells represents a different
task, and is colored such that cooler colors represent more complex tasks.
The saturation of each sub-cell indicates the percentage of grids in the
given data-set in which the organism in that cell could perform the
corresponding task.
Inputs: file_list - list of names of of avida task grid files to be used
in making figure.
intensification_factor (default 1.2): A number to multiply
the percentage of organisms doing a task by in order to increase
visibility. This can be useful in cases where a lot of the
percentages are too low to be easily visualized.
Returns: Grid indicating appropriate color values for images.
"""
# Load data
data = task_percentages(load_grid_data(file_list))
# Initialize grid
grid = [[]] * len(data)*3
for i in range(len(grid)):
grid[i] = [[]]*len(data[0])*3
# Color grid
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(3): # create grid of sub-cells
for l in range(3):
if len(data[i][j]) > k*3+l:
# build a color in matplotlib's preferred hsv format
arr = np.zeros((1, 1, 3))
arr[0, 0, 1] = float(data[i][j][k*3 + l]) \
* intensification_factor # saturate based on data
arr[0, 0, 0] = (k*3 + l)/9.0 # hue based on task
arr[0, 0, 2] = 1 # value is always 1
rgb = matplotlib.colors.hsv_to_rgb(arr) # convert rgb
grid[i*3+k][j*3+l] = list(rgb[0][0])
else:
grid[i*3+k][j*3+l] = (1, 1, 1, 1)
return make_imshow_plot(grid, "colorpercentages")
|
[
"def",
"color_percentages",
"(",
"file_list",
",",
"n_tasks",
"=",
"9",
",",
"file_name",
"=",
"\"color_percent.png\"",
",",
"intensification_factor",
"=",
"1.2",
")",
":",
"# Load data",
"data",
"=",
"task_percentages",
"(",
"load_grid_data",
"(",
"file_list",
")",
")",
"# Initialize grid",
"grid",
"=",
"[",
"[",
"]",
"]",
"*",
"len",
"(",
"data",
")",
"*",
"3",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"grid",
")",
")",
":",
"grid",
"[",
"i",
"]",
"=",
"[",
"[",
"]",
"]",
"*",
"len",
"(",
"data",
"[",
"0",
"]",
")",
"*",
"3",
"# Color grid",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"data",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"data",
"[",
"i",
"]",
")",
")",
":",
"for",
"k",
"in",
"range",
"(",
"3",
")",
":",
"# create grid of sub-cells",
"for",
"l",
"in",
"range",
"(",
"3",
")",
":",
"if",
"len",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
")",
">",
"k",
"*",
"3",
"+",
"l",
":",
"# build a color in matplotlib's preferred hsv format",
"arr",
"=",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"1",
",",
"3",
")",
")",
"arr",
"[",
"0",
",",
"0",
",",
"1",
"]",
"=",
"float",
"(",
"data",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
"k",
"*",
"3",
"+",
"l",
"]",
")",
"*",
"intensification_factor",
"# saturate based on data",
"arr",
"[",
"0",
",",
"0",
",",
"0",
"]",
"=",
"(",
"k",
"*",
"3",
"+",
"l",
")",
"/",
"9.0",
"# hue based on task",
"arr",
"[",
"0",
",",
"0",
",",
"2",
"]",
"=",
"1",
"# value is always 1",
"rgb",
"=",
"matplotlib",
".",
"colors",
".",
"hsv_to_rgb",
"(",
"arr",
")",
"# convert rgb",
"grid",
"[",
"i",
"*",
"3",
"+",
"k",
"]",
"[",
"j",
"*",
"3",
"+",
"l",
"]",
"=",
"list",
"(",
"rgb",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"else",
":",
"grid",
"[",
"i",
"*",
"3",
"+",
"k",
"]",
"[",
"j",
"*",
"3",
"+",
"l",
"]",
"=",
"(",
"1",
",",
"1",
",",
"1",
",",
"1",
")",
"return",
"make_imshow_plot",
"(",
"grid",
",",
"\"colorpercentages\"",
")"
] | 44.021277 | 23.893617 |
def LDREX(cpu, dest, src, offset=None):
"""
LDREX loads data from memory.
* If the physical address has the shared TLB attribute, LDREX
tags the physical address as exclusive access for the current
processor, and clears any exclusive access tag for this
processor for any other physical address.
* Otherwise, it tags the fact that the executing processor has
an outstanding tagged physical address.
:param Armv7Operand dest: the destination register; register
:param Armv7Operand src: the source operand: register
"""
# TODO: add lock mechanism to underlying memory --GR, 2017-06-06
cpu._LDR(dest, src, 32, False, offset)
|
[
"def",
"LDREX",
"(",
"cpu",
",",
"dest",
",",
"src",
",",
"offset",
"=",
"None",
")",
":",
"# TODO: add lock mechanism to underlying memory --GR, 2017-06-06",
"cpu",
".",
"_LDR",
"(",
"dest",
",",
"src",
",",
"32",
",",
"False",
",",
"offset",
")"
] | 48 | 17.733333 |
def interval(
value=None,
unit='s',
years=None,
quarters=None,
months=None,
weeks=None,
days=None,
hours=None,
minutes=None,
seconds=None,
milliseconds=None,
microseconds=None,
nanoseconds=None,
):
"""
Returns an interval literal
Parameters
----------
value : int or datetime.timedelta, default None
years : int, default None
quarters : int, default None
months : int, default None
days : int, default None
weeks : int, default None
hours : int, default None
minutes : int, default None
seconds : int, default None
milliseconds : int, default None
microseconds : int, default None
nanoseconds : int, default None
Returns
--------
result : IntervalScalar
"""
if value is not None:
if isinstance(value, datetime.timedelta):
unit = 's'
value = int(value.total_seconds())
elif not isinstance(value, int):
raise ValueError('Interval value must be an integer')
else:
kwds = [
('Y', years),
('Q', quarters),
('M', months),
('W', weeks),
('D', days),
('h', hours),
('m', minutes),
('s', seconds),
('ms', milliseconds),
('us', microseconds),
('ns', nanoseconds),
]
defined_units = [(k, v) for k, v in kwds if v is not None]
if len(defined_units) != 1:
raise ValueError('Exactly one argument is required')
unit, value = defined_units[0]
value_type = literal(value).type()
type = dt.Interval(unit, value_type)
return literal(value, type=type).op().to_expr()
|
[
"def",
"interval",
"(",
"value",
"=",
"None",
",",
"unit",
"=",
"'s'",
",",
"years",
"=",
"None",
",",
"quarters",
"=",
"None",
",",
"months",
"=",
"None",
",",
"weeks",
"=",
"None",
",",
"days",
"=",
"None",
",",
"hours",
"=",
"None",
",",
"minutes",
"=",
"None",
",",
"seconds",
"=",
"None",
",",
"milliseconds",
"=",
"None",
",",
"microseconds",
"=",
"None",
",",
"nanoseconds",
"=",
"None",
",",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"timedelta",
")",
":",
"unit",
"=",
"'s'",
"value",
"=",
"int",
"(",
"value",
".",
"total_seconds",
"(",
")",
")",
"elif",
"not",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"'Interval value must be an integer'",
")",
"else",
":",
"kwds",
"=",
"[",
"(",
"'Y'",
",",
"years",
")",
",",
"(",
"'Q'",
",",
"quarters",
")",
",",
"(",
"'M'",
",",
"months",
")",
",",
"(",
"'W'",
",",
"weeks",
")",
",",
"(",
"'D'",
",",
"days",
")",
",",
"(",
"'h'",
",",
"hours",
")",
",",
"(",
"'m'",
",",
"minutes",
")",
",",
"(",
"'s'",
",",
"seconds",
")",
",",
"(",
"'ms'",
",",
"milliseconds",
")",
",",
"(",
"'us'",
",",
"microseconds",
")",
",",
"(",
"'ns'",
",",
"nanoseconds",
")",
",",
"]",
"defined_units",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwds",
"if",
"v",
"is",
"not",
"None",
"]",
"if",
"len",
"(",
"defined_units",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Exactly one argument is required'",
")",
"unit",
",",
"value",
"=",
"defined_units",
"[",
"0",
"]",
"value_type",
"=",
"literal",
"(",
"value",
")",
".",
"type",
"(",
")",
"type",
"=",
"dt",
".",
"Interval",
"(",
"unit",
",",
"value_type",
")",
"return",
"literal",
"(",
"value",
",",
"type",
"=",
"type",
")",
".",
"op",
"(",
")",
".",
"to_expr",
"(",
")"
] | 24.602941 | 17.720588 |
def profile_settings_args_install_json(self, ij, required):
"""Return args based on install.json params.
Args:
ij (dict): The install.json contents.
required (bool): If True only required args will be returned.
Returns:
dict: Dictionary of required or optional App args.
"""
profile_args = {}
# add App specific args
for p in ij.get('params') or []:
# TODO: fix this required logic
if p.get('required', False) != required and required is not None:
continue
if p.get('type').lower() == 'boolean':
profile_args[p.get('name')] = self._to_bool(p.get('default', False))
elif p.get('type').lower() == 'choice':
valid_values = '|'.join(self.expand_valid_values(p.get('validValues', [])))
profile_args[p.get('name')] = '[{}]'.format(valid_values)
elif p.get('type').lower() == 'multichoice':
profile_args[p.get('name')] = p.get('validValues', [])
elif p.get('name') in ['api_access_id', 'api_secret_key']:
# leave these parameters set to the value defined in defaults
pass
else:
types = '|'.join(p.get('playbookDataType', []))
if types:
profile_args[p.get('name')] = p.get('default', '<{}>'.format(types))
else:
profile_args[p.get('name')] = p.get('default', '')
return profile_args
|
[
"def",
"profile_settings_args_install_json",
"(",
"self",
",",
"ij",
",",
"required",
")",
":",
"profile_args",
"=",
"{",
"}",
"# add App specific args",
"for",
"p",
"in",
"ij",
".",
"get",
"(",
"'params'",
")",
"or",
"[",
"]",
":",
"# TODO: fix this required logic",
"if",
"p",
".",
"get",
"(",
"'required'",
",",
"False",
")",
"!=",
"required",
"and",
"required",
"is",
"not",
"None",
":",
"continue",
"if",
"p",
".",
"get",
"(",
"'type'",
")",
".",
"lower",
"(",
")",
"==",
"'boolean'",
":",
"profile_args",
"[",
"p",
".",
"get",
"(",
"'name'",
")",
"]",
"=",
"self",
".",
"_to_bool",
"(",
"p",
".",
"get",
"(",
"'default'",
",",
"False",
")",
")",
"elif",
"p",
".",
"get",
"(",
"'type'",
")",
".",
"lower",
"(",
")",
"==",
"'choice'",
":",
"valid_values",
"=",
"'|'",
".",
"join",
"(",
"self",
".",
"expand_valid_values",
"(",
"p",
".",
"get",
"(",
"'validValues'",
",",
"[",
"]",
")",
")",
")",
"profile_args",
"[",
"p",
".",
"get",
"(",
"'name'",
")",
"]",
"=",
"'[{}]'",
".",
"format",
"(",
"valid_values",
")",
"elif",
"p",
".",
"get",
"(",
"'type'",
")",
".",
"lower",
"(",
")",
"==",
"'multichoice'",
":",
"profile_args",
"[",
"p",
".",
"get",
"(",
"'name'",
")",
"]",
"=",
"p",
".",
"get",
"(",
"'validValues'",
",",
"[",
"]",
")",
"elif",
"p",
".",
"get",
"(",
"'name'",
")",
"in",
"[",
"'api_access_id'",
",",
"'api_secret_key'",
"]",
":",
"# leave these parameters set to the value defined in defaults",
"pass",
"else",
":",
"types",
"=",
"'|'",
".",
"join",
"(",
"p",
".",
"get",
"(",
"'playbookDataType'",
",",
"[",
"]",
")",
")",
"if",
"types",
":",
"profile_args",
"[",
"p",
".",
"get",
"(",
"'name'",
")",
"]",
"=",
"p",
".",
"get",
"(",
"'default'",
",",
"'<{}>'",
".",
"format",
"(",
"types",
")",
")",
"else",
":",
"profile_args",
"[",
"p",
".",
"get",
"(",
"'name'",
")",
"]",
"=",
"p",
".",
"get",
"(",
"'default'",
",",
"''",
")",
"return",
"profile_args"
] | 44.941176 | 23.147059 |
def get_drawing(self, drawing_id):
"""
Return the Drawing or raise a 404 if the drawing is unknown
"""
try:
return self._drawings[drawing_id]
except KeyError:
raise aiohttp.web.HTTPNotFound(text="Drawing ID {} doesn't exist".format(drawing_id))
|
[
"def",
"get_drawing",
"(",
"self",
",",
"drawing_id",
")",
":",
"try",
":",
"return",
"self",
".",
"_drawings",
"[",
"drawing_id",
"]",
"except",
"KeyError",
":",
"raise",
"aiohttp",
".",
"web",
".",
"HTTPNotFound",
"(",
"text",
"=",
"\"Drawing ID {} doesn't exist\"",
".",
"format",
"(",
"drawing_id",
")",
")"
] | 37.625 | 17.375 |
def asarray(self):
"""
Construct a numpy array from this column. Note that this
creates a copy of the data, so modifications made to the
array will *not* be recorded in the original document.
"""
# most codes don't use this feature, this is the only place
# numpy is used here, and importing numpy can be
# time-consuming, so we derfer the import until needed.
import numpy
try:
dtype = ligolwtypes.ToNumPyType[self.Type]
except KeyError as e:
raise TypeError("cannot determine numpy dtype for Column '%s': %s" % (self.getAttribute("Name"), e))
return numpy.fromiter(self, dtype = dtype)
|
[
"def",
"asarray",
"(",
"self",
")",
":",
"# most codes don't use this feature, this is the only place",
"# numpy is used here, and importing numpy can be",
"# time-consuming, so we derfer the import until needed.",
"import",
"numpy",
"try",
":",
"dtype",
"=",
"ligolwtypes",
".",
"ToNumPyType",
"[",
"self",
".",
"Type",
"]",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"\"cannot determine numpy dtype for Column '%s': %s\"",
"%",
"(",
"self",
".",
"getAttribute",
"(",
"\"Name\"",
")",
",",
"e",
")",
")",
"return",
"numpy",
".",
"fromiter",
"(",
"self",
",",
"dtype",
"=",
"dtype",
")"
] | 40.266667 | 18.133333 |
def replace_priority_class(self, name, body, **kwargs): # noqa: E501
"""replace_priority_class # noqa: E501
replace the specified PriorityClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_priority_class(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PriorityClass (required)
:param V1beta1PriorityClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1PriorityClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_priority_class_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.replace_priority_class_with_http_info(name, body, **kwargs) # noqa: E501
return data
|
[
"def",
"replace_priority_class",
"(",
"self",
",",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"replace_priority_class_with_http_info",
"(",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"replace_priority_class_with_http_info",
"(",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | 57.041667 | 30.458333 |
def assign_unassigned_members(self, group_category_id, sync=None):
"""
Assign unassigned members.
Assign all unassigned members as evenly as possible among the existing
student groups.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_category_id
"""ID"""
path["group_category_id"] = group_category_id
# OPTIONAL - sync
"""The assigning is done asynchronously by default. If you would like to
override this and have the assigning done synchronously, set this value
to true."""
if sync is not None:
data["sync"] = sync
self.logger.debug("POST /api/v1/group_categories/{group_category_id}/assign_unassigned_members with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/group_categories/{group_category_id}/assign_unassigned_members".format(**path), data=data, params=params, single_item=True)
|
[
"def",
"assign_unassigned_members",
"(",
"self",
",",
"group_category_id",
",",
"sync",
"=",
"None",
")",
":",
"path",
"=",
"{",
"}",
"data",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"# REQUIRED - PATH - group_category_id\r",
"\"\"\"ID\"\"\"",
"path",
"[",
"\"group_category_id\"",
"]",
"=",
"group_category_id",
"# OPTIONAL - sync\r",
"\"\"\"The assigning is done asynchronously by default. If you would like to\r\n override this and have the assigning done synchronously, set this value\r\n to true.\"\"\"",
"if",
"sync",
"is",
"not",
"None",
":",
"data",
"[",
"\"sync\"",
"]",
"=",
"sync",
"self",
".",
"logger",
".",
"debug",
"(",
"\"POST /api/v1/group_categories/{group_category_id}/assign_unassigned_members with query params: {params} and form data: {data}\"",
".",
"format",
"(",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"*",
"*",
"path",
")",
")",
"return",
"self",
".",
"generic_request",
"(",
"\"POST\"",
",",
"\"/api/v1/group_categories/{group_category_id}/assign_unassigned_members\"",
".",
"format",
"(",
"*",
"*",
"path",
")",
",",
"data",
"=",
"data",
",",
"params",
"=",
"params",
",",
"single_item",
"=",
"True",
")"
] | 43.583333 | 28.833333 |
def _transform_row_wrapper(self, row):
"""
Transforms a single source row.
:param dict[str|str] row: The source row.
"""
self._count_total += 1
try:
# Transform the naturals keys in line to technical keys.
in_row = copy.copy(row)
out_row = {}
park_info, ignore_info = self._transform_row(in_row, out_row)
except Exception as e:
# Log the exception.
self._handle_exception(row, e)
# Keep track of the number of errors.
self._count_error += 1
# This row must be parked.
park_info = 'Exception'
# Keep our IDE happy.
ignore_info = None
out_row = {}
if park_info:
# Park the row.
self.pre_park_row(park_info, row)
self._parked_writer.writerow(row)
self._count_park += 1
elif ignore_info:
# Ignore the row.
self.pre_ignore_row(ignore_info, row)
self._ignored_writer.writerow(row)
self._count_ignore += 1
else:
# Write the technical keys and measures to the output file.
self._transformed_writer.writerow(out_row)
self._count_transform += 1
|
[
"def",
"_transform_row_wrapper",
"(",
"self",
",",
"row",
")",
":",
"self",
".",
"_count_total",
"+=",
"1",
"try",
":",
"# Transform the naturals keys in line to technical keys.",
"in_row",
"=",
"copy",
".",
"copy",
"(",
"row",
")",
"out_row",
"=",
"{",
"}",
"park_info",
",",
"ignore_info",
"=",
"self",
".",
"_transform_row",
"(",
"in_row",
",",
"out_row",
")",
"except",
"Exception",
"as",
"e",
":",
"# Log the exception.",
"self",
".",
"_handle_exception",
"(",
"row",
",",
"e",
")",
"# Keep track of the number of errors.",
"self",
".",
"_count_error",
"+=",
"1",
"# This row must be parked.",
"park_info",
"=",
"'Exception'",
"# Keep our IDE happy.",
"ignore_info",
"=",
"None",
"out_row",
"=",
"{",
"}",
"if",
"park_info",
":",
"# Park the row.",
"self",
".",
"pre_park_row",
"(",
"park_info",
",",
"row",
")",
"self",
".",
"_parked_writer",
".",
"writerow",
"(",
"row",
")",
"self",
".",
"_count_park",
"+=",
"1",
"elif",
"ignore_info",
":",
"# Ignore the row.",
"self",
".",
"pre_ignore_row",
"(",
"ignore_info",
",",
"row",
")",
"self",
".",
"_ignored_writer",
".",
"writerow",
"(",
"row",
")",
"self",
".",
"_count_ignore",
"+=",
"1",
"else",
":",
"# Write the technical keys and measures to the output file.",
"self",
".",
"_transformed_writer",
".",
"writerow",
"(",
"out_row",
")",
"self",
".",
"_count_transform",
"+=",
"1"
] | 32.512821 | 13.74359 |
def open_connection(self):
"""Open an sqlite connection to the metadata database.
By default the metadata database will be used in the plugin dir,
unless an explicit path has been set using setmetadataDbPath, or
overridden in QSettings. If the db does not exist it will
be created.
:raises: An sqlite.Error is raised if anything goes wrong
"""
self.connection = None
base_directory = os.path.dirname(self.metadata_db_path)
if not os.path.exists(base_directory):
try:
os.mkdir(base_directory)
except IOError:
LOGGER.exception(
'Could not create directory for metadata cache.')
raise
try:
self.connection = sqlite.connect(self.metadata_db_path)
except (OperationalError, sqlite.Error):
LOGGER.exception('Failed to open metadata cache database.')
raise
|
[
"def",
"open_connection",
"(",
"self",
")",
":",
"self",
".",
"connection",
"=",
"None",
"base_directory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"metadata_db_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"base_directory",
")",
":",
"try",
":",
"os",
".",
"mkdir",
"(",
"base_directory",
")",
"except",
"IOError",
":",
"LOGGER",
".",
"exception",
"(",
"'Could not create directory for metadata cache.'",
")",
"raise",
"try",
":",
"self",
".",
"connection",
"=",
"sqlite",
".",
"connect",
"(",
"self",
".",
"metadata_db_path",
")",
"except",
"(",
"OperationalError",
",",
"sqlite",
".",
"Error",
")",
":",
"LOGGER",
".",
"exception",
"(",
"'Failed to open metadata cache database.'",
")",
"raise"
] | 38.08 | 20.68 |
def _check_classmethod_declaration(self, node):
"""Checks for uses of classmethod() or staticmethod()
When a @classmethod or @staticmethod decorator should be used instead.
A message will be emitted only if the assignment is at a class scope
and only if the classmethod's argument belongs to the class where it
is defined.
`node` is an assign node.
"""
if not isinstance(node.value, astroid.Call):
return
# check the function called is "classmethod" or "staticmethod"
func = node.value.func
if not isinstance(func, astroid.Name) or func.name not in (
"classmethod",
"staticmethod",
):
return
msg = (
"no-classmethod-decorator"
if func.name == "classmethod"
else "no-staticmethod-decorator"
)
# assignment must be at a class scope
parent_class = node.scope()
if not isinstance(parent_class, astroid.ClassDef):
return
# Check if the arg passed to classmethod is a class member
classmeth_arg = node.value.args[0]
if not isinstance(classmeth_arg, astroid.Name):
return
method_name = classmeth_arg.name
if any(method_name == member.name for member in parent_class.mymethods()):
self.add_message(msg, node=node.targets[0])
|
[
"def",
"_check_classmethod_declaration",
"(",
"self",
",",
"node",
")",
":",
"if",
"not",
"isinstance",
"(",
"node",
".",
"value",
",",
"astroid",
".",
"Call",
")",
":",
"return",
"# check the function called is \"classmethod\" or \"staticmethod\"",
"func",
"=",
"node",
".",
"value",
".",
"func",
"if",
"not",
"isinstance",
"(",
"func",
",",
"astroid",
".",
"Name",
")",
"or",
"func",
".",
"name",
"not",
"in",
"(",
"\"classmethod\"",
",",
"\"staticmethod\"",
",",
")",
":",
"return",
"msg",
"=",
"(",
"\"no-classmethod-decorator\"",
"if",
"func",
".",
"name",
"==",
"\"classmethod\"",
"else",
"\"no-staticmethod-decorator\"",
")",
"# assignment must be at a class scope",
"parent_class",
"=",
"node",
".",
"scope",
"(",
")",
"if",
"not",
"isinstance",
"(",
"parent_class",
",",
"astroid",
".",
"ClassDef",
")",
":",
"return",
"# Check if the arg passed to classmethod is a class member",
"classmeth_arg",
"=",
"node",
".",
"value",
".",
"args",
"[",
"0",
"]",
"if",
"not",
"isinstance",
"(",
"classmeth_arg",
",",
"astroid",
".",
"Name",
")",
":",
"return",
"method_name",
"=",
"classmeth_arg",
".",
"name",
"if",
"any",
"(",
"method_name",
"==",
"member",
".",
"name",
"for",
"member",
"in",
"parent_class",
".",
"mymethods",
"(",
")",
")",
":",
"self",
".",
"add_message",
"(",
"msg",
",",
"node",
"=",
"node",
".",
"targets",
"[",
"0",
"]",
")"
] | 36.315789 | 20 |
def insert_paragraph_before(self, text=None, style=None):
"""
Return a newly created paragraph, inserted directly before this
paragraph. If *text* is supplied, the new paragraph contains that
text in a single run. If *style* is provided, that style is assigned
to the new paragraph.
"""
paragraph = self._insert_paragraph_before()
if text:
paragraph.add_run(text)
if style is not None:
paragraph.style = style
return paragraph
|
[
"def",
"insert_paragraph_before",
"(",
"self",
",",
"text",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"paragraph",
"=",
"self",
".",
"_insert_paragraph_before",
"(",
")",
"if",
"text",
":",
"paragraph",
".",
"add_run",
"(",
"text",
")",
"if",
"style",
"is",
"not",
"None",
":",
"paragraph",
".",
"style",
"=",
"style",
"return",
"paragraph"
] | 39.846154 | 15.384615 |
def get_sos_decomposition(sdp, y_mat=None, threshold=0.0):
"""Given a solution of the dual problem, it returns the SOS
decomposition.
:param sdp: The SDP relaxation to be solved.
:type sdp: :class:`ncpol2sdpa.sdp`.
:param y_mat: Optional parameter providing the dual solution of the
moment matrix. If not provided, the solution is extracted
from the sdp object.
:type y_mat: :class:`numpy.array`.
:param threshold: Optional parameter for specifying the threshold value
below which the eigenvalues and entries of the
eigenvectors are disregarded.
:type threshold: float.
:returns: The SOS decomposition of [sigma_0, sigma_1, ..., sigma_m]
:rtype: list of :class:`sympy.core.exp.Expr`.
"""
if len(sdp.monomial_sets) != 1:
raise Exception("Cannot automatically match primal and dual " +
"variables.")
elif len(sdp.y_mat[1:]) != len(sdp.constraints):
raise Exception("Cannot automatically match constraints with blocks " +
"in the dual solution.")
elif sdp.status == "unsolved" and y_mat is None:
raise Exception("The SDP relaxation is unsolved and dual solution " +
"is not provided!")
elif sdp.status != "unsolved" and y_mat is None:
y_mat = sdp.y_mat
sos = []
for y_mat_block in y_mat:
term = 0
vals, vecs = np.linalg.eigh(y_mat_block)
for j, val in enumerate(vals):
if val < -0.001:
raise Exception("Large negative eigenvalue: " + val +
". Matrix cannot be positive.")
elif val > 0:
sub_term = 0
for i, entry in enumerate(vecs[:, j]):
sub_term += entry * sdp.monomial_sets[0][i]
term += val * sub_term**2
term = expand(term)
new_term = 0
if term.is_Mul:
elements = [term]
else:
elements = term.as_coeff_mul()[1][0].as_coeff_add()[1]
for element in elements:
_, coeff = separate_scalar_factor(element)
if abs(coeff) > threshold:
new_term += element
sos.append(new_term)
return sos
|
[
"def",
"get_sos_decomposition",
"(",
"sdp",
",",
"y_mat",
"=",
"None",
",",
"threshold",
"=",
"0.0",
")",
":",
"if",
"len",
"(",
"sdp",
".",
"monomial_sets",
")",
"!=",
"1",
":",
"raise",
"Exception",
"(",
"\"Cannot automatically match primal and dual \"",
"+",
"\"variables.\"",
")",
"elif",
"len",
"(",
"sdp",
".",
"y_mat",
"[",
"1",
":",
"]",
")",
"!=",
"len",
"(",
"sdp",
".",
"constraints",
")",
":",
"raise",
"Exception",
"(",
"\"Cannot automatically match constraints with blocks \"",
"+",
"\"in the dual solution.\"",
")",
"elif",
"sdp",
".",
"status",
"==",
"\"unsolved\"",
"and",
"y_mat",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"The SDP relaxation is unsolved and dual solution \"",
"+",
"\"is not provided!\"",
")",
"elif",
"sdp",
".",
"status",
"!=",
"\"unsolved\"",
"and",
"y_mat",
"is",
"None",
":",
"y_mat",
"=",
"sdp",
".",
"y_mat",
"sos",
"=",
"[",
"]",
"for",
"y_mat_block",
"in",
"y_mat",
":",
"term",
"=",
"0",
"vals",
",",
"vecs",
"=",
"np",
".",
"linalg",
".",
"eigh",
"(",
"y_mat_block",
")",
"for",
"j",
",",
"val",
"in",
"enumerate",
"(",
"vals",
")",
":",
"if",
"val",
"<",
"-",
"0.001",
":",
"raise",
"Exception",
"(",
"\"Large negative eigenvalue: \"",
"+",
"val",
"+",
"\". Matrix cannot be positive.\"",
")",
"elif",
"val",
">",
"0",
":",
"sub_term",
"=",
"0",
"for",
"i",
",",
"entry",
"in",
"enumerate",
"(",
"vecs",
"[",
":",
",",
"j",
"]",
")",
":",
"sub_term",
"+=",
"entry",
"*",
"sdp",
".",
"monomial_sets",
"[",
"0",
"]",
"[",
"i",
"]",
"term",
"+=",
"val",
"*",
"sub_term",
"**",
"2",
"term",
"=",
"expand",
"(",
"term",
")",
"new_term",
"=",
"0",
"if",
"term",
".",
"is_Mul",
":",
"elements",
"=",
"[",
"term",
"]",
"else",
":",
"elements",
"=",
"term",
".",
"as_coeff_mul",
"(",
")",
"[",
"1",
"]",
"[",
"0",
"]",
".",
"as_coeff_add",
"(",
")",
"[",
"1",
"]",
"for",
"element",
"in",
"elements",
":",
"_",
",",
"coeff",
"=",
"separate_scalar_factor",
"(",
"element",
")",
"if",
"abs",
"(",
"coeff",
")",
">",
"threshold",
":",
"new_term",
"+=",
"element",
"sos",
".",
"append",
"(",
"new_term",
")",
"return",
"sos"
] | 42.641509 | 15.962264 |
def resolve_type(arg):
# type: (object) -> InternalType
"""
Resolve object to one of our internal collection types or generic built-in type.
Args:
arg: object to resolve
"""
arg_type = type(arg)
if arg_type == list:
assert isinstance(arg, list) # this line helps mypy figure out types
sample = arg[:min(4, len(arg))]
tentative_type = TentativeType()
for sample_item in sample:
tentative_type.add(resolve_type(sample_item))
return ListType(tentative_type)
elif arg_type == set:
assert isinstance(arg, set) # this line helps mypy figure out types
sample = []
iterator = iter(arg)
for i in range(0, min(4, len(arg))):
sample.append(next(iterator))
tentative_type = TentativeType()
for sample_item in sample:
tentative_type.add(resolve_type(sample_item))
return SetType(tentative_type)
elif arg_type == FakeIterator:
assert isinstance(arg, FakeIterator) # this line helps mypy figure out types
sample = []
iterator = iter(arg)
for i in range(0, min(4, len(arg))):
sample.append(next(iterator))
tentative_type = TentativeType()
for sample_item in sample:
tentative_type.add(resolve_type(sample_item))
return IteratorType(tentative_type)
elif arg_type == tuple:
assert isinstance(arg, tuple) # this line helps mypy figure out types
sample = list(arg[:min(10, len(arg))])
return TupleType([resolve_type(sample_item) for sample_item in sample])
elif arg_type == dict:
assert isinstance(arg, dict) # this line helps mypy figure out types
key_tt = TentativeType()
val_tt = TentativeType()
for i, (k, v) in enumerate(iteritems(arg)):
if i > 4:
break
key_tt.add(resolve_type(k))
val_tt.add(resolve_type(v))
return DictType(key_tt, val_tt)
else:
return type(arg)
|
[
"def",
"resolve_type",
"(",
"arg",
")",
":",
"# type: (object) -> InternalType",
"arg_type",
"=",
"type",
"(",
"arg",
")",
"if",
"arg_type",
"==",
"list",
":",
"assert",
"isinstance",
"(",
"arg",
",",
"list",
")",
"# this line helps mypy figure out types",
"sample",
"=",
"arg",
"[",
":",
"min",
"(",
"4",
",",
"len",
"(",
"arg",
")",
")",
"]",
"tentative_type",
"=",
"TentativeType",
"(",
")",
"for",
"sample_item",
"in",
"sample",
":",
"tentative_type",
".",
"add",
"(",
"resolve_type",
"(",
"sample_item",
")",
")",
"return",
"ListType",
"(",
"tentative_type",
")",
"elif",
"arg_type",
"==",
"set",
":",
"assert",
"isinstance",
"(",
"arg",
",",
"set",
")",
"# this line helps mypy figure out types",
"sample",
"=",
"[",
"]",
"iterator",
"=",
"iter",
"(",
"arg",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"min",
"(",
"4",
",",
"len",
"(",
"arg",
")",
")",
")",
":",
"sample",
".",
"append",
"(",
"next",
"(",
"iterator",
")",
")",
"tentative_type",
"=",
"TentativeType",
"(",
")",
"for",
"sample_item",
"in",
"sample",
":",
"tentative_type",
".",
"add",
"(",
"resolve_type",
"(",
"sample_item",
")",
")",
"return",
"SetType",
"(",
"tentative_type",
")",
"elif",
"arg_type",
"==",
"FakeIterator",
":",
"assert",
"isinstance",
"(",
"arg",
",",
"FakeIterator",
")",
"# this line helps mypy figure out types",
"sample",
"=",
"[",
"]",
"iterator",
"=",
"iter",
"(",
"arg",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"min",
"(",
"4",
",",
"len",
"(",
"arg",
")",
")",
")",
":",
"sample",
".",
"append",
"(",
"next",
"(",
"iterator",
")",
")",
"tentative_type",
"=",
"TentativeType",
"(",
")",
"for",
"sample_item",
"in",
"sample",
":",
"tentative_type",
".",
"add",
"(",
"resolve_type",
"(",
"sample_item",
")",
")",
"return",
"IteratorType",
"(",
"tentative_type",
")",
"elif",
"arg_type",
"==",
"tuple",
":",
"assert",
"isinstance",
"(",
"arg",
",",
"tuple",
")",
"# this line helps mypy figure out types",
"sample",
"=",
"list",
"(",
"arg",
"[",
":",
"min",
"(",
"10",
",",
"len",
"(",
"arg",
")",
")",
"]",
")",
"return",
"TupleType",
"(",
"[",
"resolve_type",
"(",
"sample_item",
")",
"for",
"sample_item",
"in",
"sample",
"]",
")",
"elif",
"arg_type",
"==",
"dict",
":",
"assert",
"isinstance",
"(",
"arg",
",",
"dict",
")",
"# this line helps mypy figure out types",
"key_tt",
"=",
"TentativeType",
"(",
")",
"val_tt",
"=",
"TentativeType",
"(",
")",
"for",
"i",
",",
"(",
"k",
",",
"v",
")",
"in",
"enumerate",
"(",
"iteritems",
"(",
"arg",
")",
")",
":",
"if",
"i",
">",
"4",
":",
"break",
"key_tt",
".",
"add",
"(",
"resolve_type",
"(",
"k",
")",
")",
"val_tt",
".",
"add",
"(",
"resolve_type",
"(",
"v",
")",
")",
"return",
"DictType",
"(",
"key_tt",
",",
"val_tt",
")",
"else",
":",
"return",
"type",
"(",
"arg",
")"
] | 38.384615 | 14.076923 |
def market_close(self, session, mins) -> Session:
"""
Time intervals for market close
Args:
session: [allday, day, am, pm, night]
mins: mintues before close
Returns:
Session of start_time and end_time
"""
if session not in self.exch: return SessNA
end_time = self.exch[session][-1]
return Session(shift_time(end_time, -int(mins) + 1), end_time)
|
[
"def",
"market_close",
"(",
"self",
",",
"session",
",",
"mins",
")",
"->",
"Session",
":",
"if",
"session",
"not",
"in",
"self",
".",
"exch",
":",
"return",
"SessNA",
"end_time",
"=",
"self",
".",
"exch",
"[",
"session",
"]",
"[",
"-",
"1",
"]",
"return",
"Session",
"(",
"shift_time",
"(",
"end_time",
",",
"-",
"int",
"(",
"mins",
")",
"+",
"1",
")",
",",
"end_time",
")"
] | 30.928571 | 14.214286 |
def get(self, targetId):
"""
Yields the analysed wav data.
:param targetId:
:return:
"""
result = self._targetController.analyse(targetId)
if result:
if len(result) == 2:
if result[1] == 404:
return result
else:
return {'name': targetId, 'data': self._jsonify(result)}, 200
else:
return None, 404
else:
return None, 500
|
[
"def",
"get",
"(",
"self",
",",
"targetId",
")",
":",
"result",
"=",
"self",
".",
"_targetController",
".",
"analyse",
"(",
"targetId",
")",
"if",
"result",
":",
"if",
"len",
"(",
"result",
")",
"==",
"2",
":",
"if",
"result",
"[",
"1",
"]",
"==",
"404",
":",
"return",
"result",
"else",
":",
"return",
"{",
"'name'",
":",
"targetId",
",",
"'data'",
":",
"self",
".",
"_jsonify",
"(",
"result",
")",
"}",
",",
"200",
"else",
":",
"return",
"None",
",",
"404",
"else",
":",
"return",
"None",
",",
"500"
] | 29 | 14.411765 |
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
[
"def",
"sources_list",
"(",
"ruby",
"=",
"None",
",",
"runas",
"=",
"None",
",",
"gem_bin",
"=",
"None",
")",
":",
"ret",
"=",
"_gem",
"(",
"[",
"'sources'",
"]",
",",
"ruby",
",",
"gem_bin",
"=",
"gem_bin",
",",
"runas",
"=",
"runas",
")",
"return",
"[",
"]",
"if",
"ret",
"is",
"False",
"else",
"ret",
".",
"splitlines",
"(",
")",
"[",
"2",
":",
"]"
] | 29.05 | 20.25 |
def get_fitness(self, solution):
"""Return fitness for the given solution."""
return self._fitness_function(solution, *self._fitness_args,
**self._fitness_kwargs)
|
[
"def",
"get_fitness",
"(",
"self",
",",
"solution",
")",
":",
"return",
"self",
".",
"_fitness_function",
"(",
"solution",
",",
"*",
"self",
".",
"_fitness_args",
",",
"*",
"*",
"self",
".",
"_fitness_kwargs",
")"
] | 53.25 | 14.25 |
def insert_with_id(obj):
"""
Generates a unique ID for the supplied legislator/committee/bill
and inserts it into the appropriate collection.
"""
if '_id' in obj:
raise ValueError("object already has '_id' field")
# add created_at/updated_at on insert
obj['created_at'] = datetime.datetime.utcnow()
obj['updated_at'] = obj['created_at']
if obj['_type'] == 'person' or obj['_type'] == 'legislator':
collection = db.legislators
id_type = 'L'
elif obj['_type'] == 'committee':
collection = db.committees
id_type = 'C'
elif obj['_type'] == 'bill':
collection = db.bills
id_type = 'B'
else:
raise ValueError("unknown _type for object")
# get abbr
abbr = obj[settings.LEVEL_FIELD].upper()
id_reg = re.compile('^%s%s' % (abbr, id_type))
# Find the next available _id and insert
id_prefix = '%s%s' % (abbr, id_type)
cursor = collection.find({'_id': id_reg}).sort('_id', -1).limit(1)
try:
new_id = int(next(cursor)['_id'][len(abbr) + 1:]) + 1
except StopIteration:
new_id = 1
while True:
if obj['_type'] == 'bill':
obj['_id'] = '%s%08d' % (id_prefix, new_id)
else:
obj['_id'] = '%s%06d' % (id_prefix, new_id)
obj['_all_ids'] = [obj['_id']]
if obj['_type'] in ['person', 'legislator']:
obj['leg_id'] = obj['_id']
try:
return collection.insert(obj, safe=True)
except pymongo.errors.DuplicateKeyError:
new_id += 1
|
[
"def",
"insert_with_id",
"(",
"obj",
")",
":",
"if",
"'_id'",
"in",
"obj",
":",
"raise",
"ValueError",
"(",
"\"object already has '_id' field\"",
")",
"# add created_at/updated_at on insert",
"obj",
"[",
"'created_at'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"obj",
"[",
"'updated_at'",
"]",
"=",
"obj",
"[",
"'created_at'",
"]",
"if",
"obj",
"[",
"'_type'",
"]",
"==",
"'person'",
"or",
"obj",
"[",
"'_type'",
"]",
"==",
"'legislator'",
":",
"collection",
"=",
"db",
".",
"legislators",
"id_type",
"=",
"'L'",
"elif",
"obj",
"[",
"'_type'",
"]",
"==",
"'committee'",
":",
"collection",
"=",
"db",
".",
"committees",
"id_type",
"=",
"'C'",
"elif",
"obj",
"[",
"'_type'",
"]",
"==",
"'bill'",
":",
"collection",
"=",
"db",
".",
"bills",
"id_type",
"=",
"'B'",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown _type for object\"",
")",
"# get abbr",
"abbr",
"=",
"obj",
"[",
"settings",
".",
"LEVEL_FIELD",
"]",
".",
"upper",
"(",
")",
"id_reg",
"=",
"re",
".",
"compile",
"(",
"'^%s%s'",
"%",
"(",
"abbr",
",",
"id_type",
")",
")",
"# Find the next available _id and insert",
"id_prefix",
"=",
"'%s%s'",
"%",
"(",
"abbr",
",",
"id_type",
")",
"cursor",
"=",
"collection",
".",
"find",
"(",
"{",
"'_id'",
":",
"id_reg",
"}",
")",
".",
"sort",
"(",
"'_id'",
",",
"-",
"1",
")",
".",
"limit",
"(",
"1",
")",
"try",
":",
"new_id",
"=",
"int",
"(",
"next",
"(",
"cursor",
")",
"[",
"'_id'",
"]",
"[",
"len",
"(",
"abbr",
")",
"+",
"1",
":",
"]",
")",
"+",
"1",
"except",
"StopIteration",
":",
"new_id",
"=",
"1",
"while",
"True",
":",
"if",
"obj",
"[",
"'_type'",
"]",
"==",
"'bill'",
":",
"obj",
"[",
"'_id'",
"]",
"=",
"'%s%08d'",
"%",
"(",
"id_prefix",
",",
"new_id",
")",
"else",
":",
"obj",
"[",
"'_id'",
"]",
"=",
"'%s%06d'",
"%",
"(",
"id_prefix",
",",
"new_id",
")",
"obj",
"[",
"'_all_ids'",
"]",
"=",
"[",
"obj",
"[",
"'_id'",
"]",
"]",
"if",
"obj",
"[",
"'_type'",
"]",
"in",
"[",
"'person'",
",",
"'legislator'",
"]",
":",
"obj",
"[",
"'leg_id'",
"]",
"=",
"obj",
"[",
"'_id'",
"]",
"try",
":",
"return",
"collection",
".",
"insert",
"(",
"obj",
",",
"safe",
"=",
"True",
")",
"except",
"pymongo",
".",
"errors",
".",
"DuplicateKeyError",
":",
"new_id",
"+=",
"1"
] | 29.442308 | 18.365385 |
def build(self, builder):
"""
Build XML by appending to builder
"""
builder.start("Annotations")
# populate the flags
for annotation in self.annotations:
annotation.build(builder)
builder.end("Annotations")
|
[
"def",
"build",
"(",
"self",
",",
"builder",
")",
":",
"builder",
".",
"start",
"(",
"\"Annotations\"",
")",
"# populate the flags",
"for",
"annotation",
"in",
"self",
".",
"annotations",
":",
"annotation",
".",
"build",
"(",
"builder",
")",
"builder",
".",
"end",
"(",
"\"Annotations\"",
")"
] | 24.181818 | 11.272727 |
def connect_to(self, service_name, **kwargs):
"""
Shortcut method to make instantiating the ``Connection`` classes
easier.
Forwards ``**kwargs`` like region, keys, etc. on to the constructor.
:param service_name: A string that specifies the name of the desired
service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:rtype: <kotocore.connection.Connection> instance
"""
service_class = self.get_connection(service_name)
return service_class.connect_to(**kwargs)
|
[
"def",
"connect_to",
"(",
"self",
",",
"service_name",
",",
"*",
"*",
"kwargs",
")",
":",
"service_class",
"=",
"self",
".",
"get_connection",
"(",
"service_name",
")",
"return",
"service_class",
".",
"connect_to",
"(",
"*",
"*",
"kwargs",
")"
] | 37.6 | 21.6 |
def walkFlattenChilds(self) -> Generator[
Union[Tuple[Tuple[int, int], TransTmpl], 'OneOfTransaction'],
None, None]:
"""
:return: generator of generators of tuples
((startBitAddress, endBitAddress), TransTmpl instance)
for each possiblility in this transaction
"""
for p in self.possibleTransactions:
yield p.walkFlatten(offset=self.offset,
shouldEnterFn=self.shouldEnterFn)
|
[
"def",
"walkFlattenChilds",
"(",
"self",
")",
"->",
"Generator",
"[",
"Union",
"[",
"Tuple",
"[",
"Tuple",
"[",
"int",
",",
"int",
"]",
",",
"TransTmpl",
"]",
",",
"'OneOfTransaction'",
"]",
",",
"None",
",",
"None",
"]",
":",
"for",
"p",
"in",
"self",
".",
"possibleTransactions",
":",
"yield",
"p",
".",
"walkFlatten",
"(",
"offset",
"=",
"self",
".",
"offset",
",",
"shouldEnterFn",
"=",
"self",
".",
"shouldEnterFn",
")"
] | 44.363636 | 12.545455 |
def StrToInt(input_string, bitlength):
"""
Return True if the concrete value of the input_string ends with suffix
otherwise false.
:param input_string: the string we want to transform in an integer
:param bitlength: bitlength of the bitvector representing the index of the substring
:return BVV: bit-vector representation of the integer resulting from ythe string or -1 in bitvector representation
if the string cannot be transformed into an integer
"""
try:
return BVV(int(input_string.value), bitlength)
except ValueError:
return BVV(-1, bitlength)
|
[
"def",
"StrToInt",
"(",
"input_string",
",",
"bitlength",
")",
":",
"try",
":",
"return",
"BVV",
"(",
"int",
"(",
"input_string",
".",
"value",
")",
",",
"bitlength",
")",
"except",
"ValueError",
":",
"return",
"BVV",
"(",
"-",
"1",
",",
"bitlength",
")"
] | 40.466667 | 26.066667 |
def python_sidebar_help(python_input):
"""
Create the `Layout` for the help text for the current item in the sidebar.
"""
token = 'class:sidebar.helptext'
def get_current_description():
"""
Return the description of the selected option.
"""
i = 0
for category in python_input.options:
for option in category.options:
if i == python_input.selected_option_index:
return option.description
i += 1
return ''
def get_help_text():
return [(token, get_current_description())]
return ConditionalContainer(
content=Window(
FormattedTextControl(get_help_text),
style=token,
height=Dimension(min=3)),
filter=ShowSidebar(python_input) &
Condition(lambda: python_input.show_sidebar_help) & ~is_done)
|
[
"def",
"python_sidebar_help",
"(",
"python_input",
")",
":",
"token",
"=",
"'class:sidebar.helptext'",
"def",
"get_current_description",
"(",
")",
":",
"\"\"\"\n Return the description of the selected option.\n \"\"\"",
"i",
"=",
"0",
"for",
"category",
"in",
"python_input",
".",
"options",
":",
"for",
"option",
"in",
"category",
".",
"options",
":",
"if",
"i",
"==",
"python_input",
".",
"selected_option_index",
":",
"return",
"option",
".",
"description",
"i",
"+=",
"1",
"return",
"''",
"def",
"get_help_text",
"(",
")",
":",
"return",
"[",
"(",
"token",
",",
"get_current_description",
"(",
")",
")",
"]",
"return",
"ConditionalContainer",
"(",
"content",
"=",
"Window",
"(",
"FormattedTextControl",
"(",
"get_help_text",
")",
",",
"style",
"=",
"token",
",",
"height",
"=",
"Dimension",
"(",
"min",
"=",
"3",
")",
")",
",",
"filter",
"=",
"ShowSidebar",
"(",
"python_input",
")",
"&",
"Condition",
"(",
"lambda",
":",
"python_input",
".",
"show_sidebar_help",
")",
"&",
"~",
"is_done",
")"
] | 31.321429 | 14.321429 |
def read_lsm_eventlist(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = bytes2str(stripnull(fh.read(esize - 16)))
events.append((etime, etype, etext))
count -= 1
return events
|
[
"def",
"read_lsm_eventlist",
"(",
"fh",
")",
":",
"count",
"=",
"struct",
".",
"unpack",
"(",
"'<II'",
",",
"fh",
".",
"read",
"(",
"8",
")",
")",
"[",
"1",
"]",
"events",
"=",
"[",
"]",
"while",
"count",
">",
"0",
":",
"esize",
",",
"etime",
",",
"etype",
"=",
"struct",
".",
"unpack",
"(",
"'<IdI'",
",",
"fh",
".",
"read",
"(",
"16",
")",
")",
"etext",
"=",
"bytes2str",
"(",
"stripnull",
"(",
"fh",
".",
"read",
"(",
"esize",
"-",
"16",
")",
")",
")",
"events",
".",
"append",
"(",
"(",
"etime",
",",
"etype",
",",
"etext",
")",
")",
"count",
"-=",
"1",
"return",
"events"
] | 38.6 | 15.5 |
def _sanitize_acronyms(unsafe_acronyms):
"""
Check acronyms against regex.
Normalize valid acronyms to upper-case.
If an invalid acronym is encountered raise InvalidAcronymError.
"""
valid_acronym = regex.compile(u'^[\p{Ll}\p{Lu}\p{Nd}]+$')
acronyms = []
for a in unsafe_acronyms:
if valid_acronym.match(a):
acronyms.append(a.upper())
else:
raise InvalidAcronymError(a)
return acronyms
|
[
"def",
"_sanitize_acronyms",
"(",
"unsafe_acronyms",
")",
":",
"valid_acronym",
"=",
"regex",
".",
"compile",
"(",
"u'^[\\p{Ll}\\p{Lu}\\p{Nd}]+$'",
")",
"acronyms",
"=",
"[",
"]",
"for",
"a",
"in",
"unsafe_acronyms",
":",
"if",
"valid_acronym",
".",
"match",
"(",
"a",
")",
":",
"acronyms",
".",
"append",
"(",
"a",
".",
"upper",
"(",
")",
")",
"else",
":",
"raise",
"InvalidAcronymError",
"(",
"a",
")",
"return",
"acronyms"
] | 29.866667 | 12.533333 |
def sign(self):
"""Signature function"""
self.verify_integrity()
if session.get('u2f_sign_required', False):
if request.method == 'GET':
response = self.get_signature_challenge()
if response['status'] == 'ok':
return jsonify(response), 200
else:
return jsonify(response), 404
elif request.method == 'POST':
response = self.verify_signature(request.json)
if response['status'] == 'ok':
return jsonify(response), 201
else:
return jsonify(response), 400
return jsonify({'status': 'failed', 'error': 'Unauthorized!'}), 401
|
[
"def",
"sign",
"(",
"self",
")",
":",
"self",
".",
"verify_integrity",
"(",
")",
"if",
"session",
".",
"get",
"(",
"'u2f_sign_required'",
",",
"False",
")",
":",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"response",
"=",
"self",
".",
"get_signature_challenge",
"(",
")",
"if",
"response",
"[",
"'status'",
"]",
"==",
"'ok'",
":",
"return",
"jsonify",
"(",
"response",
")",
",",
"200",
"else",
":",
"return",
"jsonify",
"(",
"response",
")",
",",
"404",
"elif",
"request",
".",
"method",
"==",
"'POST'",
":",
"response",
"=",
"self",
".",
"verify_signature",
"(",
"request",
".",
"json",
")",
"if",
"response",
"[",
"'status'",
"]",
"==",
"'ok'",
":",
"return",
"jsonify",
"(",
"response",
")",
",",
"201",
"else",
":",
"return",
"jsonify",
"(",
"response",
")",
",",
"400",
"return",
"jsonify",
"(",
"{",
"'status'",
":",
"'failed'",
",",
"'error'",
":",
"'Unauthorized!'",
"}",
")",
",",
"401"
] | 33.727273 | 18.181818 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.