text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def run_aconvasp_command(command, structure):
"""
Helper function for calling aconvasp with different arguments
"""
poscar = Poscar(structure)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
output = p.communicate(input=poscar.get_string())
return output | [
"def",
"run_aconvasp_command",
"(",
"command",
",",
"structure",
")",
":",
"poscar",
"=",
"Poscar",
"(",
"structure",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"output",
"=",
"p",
".",
"communicate",
"(",
"input",
"=",
"poscar",
".",
"get_string",
"(",
")",
")",
"return",
"output"
]
| 37.6 | 10.8 |
def strip_size(self, location='top', num_lines=None):
"""
Breadth of the strip background in inches
Parameters
----------
location : str in ``['top', 'right']``
Location of the strip text
num_lines : int
Number of text lines
"""
dpi = 72
theme = self.theme
get_property = theme.themeables.property
if location == 'right':
strip_name = 'strip_text_y'
num_lines = num_lines or self.num_vars_y
else:
strip_name = 'strip_text_x'
num_lines = num_lines or self.num_vars_x
if not num_lines:
return 0
# The facet labels are placed onto the figure using
# transAxes dimensions. The line height and line
# width are mapped to the same [0, 1] range
# i.e (pts) * (inches / pts) * (1 / inches)
try:
fontsize = get_property(strip_name, 'size')
except KeyError:
fontsize = float(theme.rcParams.get('font.size', 10))
try:
linespacing = get_property(strip_name, 'linespacing')
except KeyError:
linespacing = 1
# margins on either side of the strip text
m1, m2 = self.inner_strip_margins(location)
# Using figure.dpi value here does not workout well!
breadth = (linespacing*fontsize) * num_lines / dpi
breadth = breadth + (m1 + m2) / dpi
return breadth | [
"def",
"strip_size",
"(",
"self",
",",
"location",
"=",
"'top'",
",",
"num_lines",
"=",
"None",
")",
":",
"dpi",
"=",
"72",
"theme",
"=",
"self",
".",
"theme",
"get_property",
"=",
"theme",
".",
"themeables",
".",
"property",
"if",
"location",
"==",
"'right'",
":",
"strip_name",
"=",
"'strip_text_y'",
"num_lines",
"=",
"num_lines",
"or",
"self",
".",
"num_vars_y",
"else",
":",
"strip_name",
"=",
"'strip_text_x'",
"num_lines",
"=",
"num_lines",
"or",
"self",
".",
"num_vars_x",
"if",
"not",
"num_lines",
":",
"return",
"0",
"# The facet labels are placed onto the figure using",
"# transAxes dimensions. The line height and line",
"# width are mapped to the same [0, 1] range",
"# i.e (pts) * (inches / pts) * (1 / inches)",
"try",
":",
"fontsize",
"=",
"get_property",
"(",
"strip_name",
",",
"'size'",
")",
"except",
"KeyError",
":",
"fontsize",
"=",
"float",
"(",
"theme",
".",
"rcParams",
".",
"get",
"(",
"'font.size'",
",",
"10",
")",
")",
"try",
":",
"linespacing",
"=",
"get_property",
"(",
"strip_name",
",",
"'linespacing'",
")",
"except",
"KeyError",
":",
"linespacing",
"=",
"1",
"# margins on either side of the strip text",
"m1",
",",
"m2",
"=",
"self",
".",
"inner_strip_margins",
"(",
"location",
")",
"# Using figure.dpi value here does not workout well!",
"breadth",
"=",
"(",
"linespacing",
"*",
"fontsize",
")",
"*",
"num_lines",
"/",
"dpi",
"breadth",
"=",
"breadth",
"+",
"(",
"m1",
"+",
"m2",
")",
"/",
"dpi",
"return",
"breadth"
]
| 32.111111 | 17.444444 |
def onBinaryMessage(self, msg, fromClient):
data = bytearray()
data.extend(msg)
"""
self.print_debug("message length: {}".format(len(data)))
self.print_debug("message data: {}".format(hexlify(data)))
"""
try:
self.queue.put_nowait(data)
except asyncio.QueueFull:
pass | [
"def",
"onBinaryMessage",
"(",
"self",
",",
"msg",
",",
"fromClient",
")",
":",
"data",
"=",
"bytearray",
"(",
")",
"data",
".",
"extend",
"(",
"msg",
")",
"try",
":",
"self",
".",
"queue",
".",
"put_nowait",
"(",
"data",
")",
"except",
"asyncio",
".",
"QueueFull",
":",
"pass"
]
| 21.461538 | 19.461538 |
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
"""
Connects to Hetzner account, removes an existing record from the zone and returns a
boolean, if deletion was successful or not. Uses identifier or rtype, name & content to
lookup over all records of the zone for one or more records to delete.
"""
with self._session(self.domain, self.domain_id) as ddata:
# Validate method parameters
if identifier:
rtype, name, content = self._parse_identifier(identifier, ddata['zone']['data'])
if rtype is None or name is None or content is None:
LOGGER.info('Hetzner => Record with identifier \'%s\' does not exist',
identifier)
return True
name = ddata['cname'] if ddata['cname'] else (self._fqdn_name(name) if name else None)
records = self._list_records_in_zone(ddata['zone']['data'], rtype, name, content)
if records:
# Remove records from zone
for record in records:
rrset = ddata['zone']['data'].get_rdataset(record['name'] + '.',
rdtype=record['type'])
rdatas = []
for rdata in rrset:
if self._convert_content(record['type'],
record['content']) != rdata.to_text():
rdatas.append(rdata.to_text())
if rdatas:
rdataset = dns.rdataset.from_text_list(rrset.rdclass, rrset.rdtype,
record['ttl'], rdatas)
ddata['zone']['data'].replace_rdataset(record['name'] + '.', rdataset)
else:
ddata['zone']['data'].delete_rdataset(record['name'] + '.', record['type'])
# Post zone to Hetzner
synced_change = self._post_zone(ddata['zone'])
return synced_change
LOGGER.info('Hetzner => Record lookup has no matches')
return True | [
"def",
"_delete_record",
"(",
"self",
",",
"identifier",
"=",
"None",
",",
"rtype",
"=",
"None",
",",
"name",
"=",
"None",
",",
"content",
"=",
"None",
")",
":",
"with",
"self",
".",
"_session",
"(",
"self",
".",
"domain",
",",
"self",
".",
"domain_id",
")",
"as",
"ddata",
":",
"# Validate method parameters",
"if",
"identifier",
":",
"rtype",
",",
"name",
",",
"content",
"=",
"self",
".",
"_parse_identifier",
"(",
"identifier",
",",
"ddata",
"[",
"'zone'",
"]",
"[",
"'data'",
"]",
")",
"if",
"rtype",
"is",
"None",
"or",
"name",
"is",
"None",
"or",
"content",
"is",
"None",
":",
"LOGGER",
".",
"info",
"(",
"'Hetzner => Record with identifier \\'%s\\' does not exist'",
",",
"identifier",
")",
"return",
"True",
"name",
"=",
"ddata",
"[",
"'cname'",
"]",
"if",
"ddata",
"[",
"'cname'",
"]",
"else",
"(",
"self",
".",
"_fqdn_name",
"(",
"name",
")",
"if",
"name",
"else",
"None",
")",
"records",
"=",
"self",
".",
"_list_records_in_zone",
"(",
"ddata",
"[",
"'zone'",
"]",
"[",
"'data'",
"]",
",",
"rtype",
",",
"name",
",",
"content",
")",
"if",
"records",
":",
"# Remove records from zone",
"for",
"record",
"in",
"records",
":",
"rrset",
"=",
"ddata",
"[",
"'zone'",
"]",
"[",
"'data'",
"]",
".",
"get_rdataset",
"(",
"record",
"[",
"'name'",
"]",
"+",
"'.'",
",",
"rdtype",
"=",
"record",
"[",
"'type'",
"]",
")",
"rdatas",
"=",
"[",
"]",
"for",
"rdata",
"in",
"rrset",
":",
"if",
"self",
".",
"_convert_content",
"(",
"record",
"[",
"'type'",
"]",
",",
"record",
"[",
"'content'",
"]",
")",
"!=",
"rdata",
".",
"to_text",
"(",
")",
":",
"rdatas",
".",
"append",
"(",
"rdata",
".",
"to_text",
"(",
")",
")",
"if",
"rdatas",
":",
"rdataset",
"=",
"dns",
".",
"rdataset",
".",
"from_text_list",
"(",
"rrset",
".",
"rdclass",
",",
"rrset",
".",
"rdtype",
",",
"record",
"[",
"'ttl'",
"]",
",",
"rdatas",
")",
"ddata",
"[",
"'zone'",
"]",
"[",
"'data'",
"]",
".",
"replace_rdataset",
"(",
"record",
"[",
"'name'",
"]",
"+",
"'.'",
",",
"rdataset",
")",
"else",
":",
"ddata",
"[",
"'zone'",
"]",
"[",
"'data'",
"]",
".",
"delete_rdataset",
"(",
"record",
"[",
"'name'",
"]",
"+",
"'.'",
",",
"record",
"[",
"'type'",
"]",
")",
"# Post zone to Hetzner",
"synced_change",
"=",
"self",
".",
"_post_zone",
"(",
"ddata",
"[",
"'zone'",
"]",
")",
"return",
"synced_change",
"LOGGER",
".",
"info",
"(",
"'Hetzner => Record lookup has no matches'",
")",
"return",
"True"
]
| 56.794872 | 27.512821 |
def _check_integrity(self, lons, lats):
"""
Ensure lons and lats are:
- 1D numpy arrays
- equal size
- within the appropriate range in radians
"""
lons = np.array(lons).ravel()
lats = np.array(lats).ravel()
if len(lons.shape) != 1 or len(lats.shape) != 1:
raise ValueError('lons and lats must be 1D')
if lats.size != lons.size:
raise ValueError('lons and lats must have same length')
if (np.abs(lons)).max() > 2.*np.pi:
raise ValueError("lons must be in radians (-2*pi <= lon <= 2*pi)")
if (np.abs(lats)).max() > 0.5*np.pi:
raise ValueError("lats must be in radians (-pi/2 <= lat <= pi/2)")
return lons, lats | [
"def",
"_check_integrity",
"(",
"self",
",",
"lons",
",",
"lats",
")",
":",
"lons",
"=",
"np",
".",
"array",
"(",
"lons",
")",
".",
"ravel",
"(",
")",
"lats",
"=",
"np",
".",
"array",
"(",
"lats",
")",
".",
"ravel",
"(",
")",
"if",
"len",
"(",
"lons",
".",
"shape",
")",
"!=",
"1",
"or",
"len",
"(",
"lats",
".",
"shape",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'lons and lats must be 1D'",
")",
"if",
"lats",
".",
"size",
"!=",
"lons",
".",
"size",
":",
"raise",
"ValueError",
"(",
"'lons and lats must have same length'",
")",
"if",
"(",
"np",
".",
"abs",
"(",
"lons",
")",
")",
".",
"max",
"(",
")",
">",
"2.",
"*",
"np",
".",
"pi",
":",
"raise",
"ValueError",
"(",
"\"lons must be in radians (-2*pi <= lon <= 2*pi)\"",
")",
"if",
"(",
"np",
".",
"abs",
"(",
"lats",
")",
")",
".",
"max",
"(",
")",
">",
"0.5",
"*",
"np",
".",
"pi",
":",
"raise",
"ValueError",
"(",
"\"lats must be in radians (-pi/2 <= lat <= pi/2)\"",
")",
"return",
"lons",
",",
"lats"
]
| 37.3 | 15 |
def to_dict(self, delimiter=DEFAULT_DELIMITER, dict_type=collections.OrderedDict):
""" Get the dictionary representation of the current parser.
:param str delimiter: The delimiter used for nested dictionaries,
defaults to ":", optional
:param class dict_type: The dictionary type to use for building the dictionary
reperesentation, defaults to collections.OrderedDict, optional
:return: The dictionary representation of the parser instance
:rtype: dict
"""
root_key = self.sections()[0]
return self._build_dict(
self._sections, delimiter=delimiter, dict_type=dict_type
).get(root_key, {}) | [
"def",
"to_dict",
"(",
"self",
",",
"delimiter",
"=",
"DEFAULT_DELIMITER",
",",
"dict_type",
"=",
"collections",
".",
"OrderedDict",
")",
":",
"root_key",
"=",
"self",
".",
"sections",
"(",
")",
"[",
"0",
"]",
"return",
"self",
".",
"_build_dict",
"(",
"self",
".",
"_sections",
",",
"delimiter",
"=",
"delimiter",
",",
"dict_type",
"=",
"dict_type",
")",
".",
"get",
"(",
"root_key",
",",
"{",
"}",
")"
]
| 45.6 | 22.6 |
def genweights(p,q,dt,error=None , n=False):
"""
;+
; GENWEIGTHS : return optimal weigthing coefficients from Powell and Leben 2004<br />
; translated from Matlab genweigths.m program to IDL<br /><br />
;
; Reference : Powell, B. S., et R. R. Leben (2004), An Optimal Filter for <br />
; Geostrophic Mesoscale Currents from Along-Track Satellite Altimetry, <br />
; Journal of Atmospheric and Oceanic Technology, 21(10), 1633-1642.
;
; @author Renaud DUSSURGET, LEGOS/CTOH
; @history Created Sep. 2009 from genweights.m (Brian Powell (c) 2004, <br />
; University of Colorado, Boulder)<br />
;-
"""
p = np.abs(p)
q = np.abs(q)
#check inputs
if (-p > q) : raise "genweights : P must be lesser than q"
#Build matrices
N = p + q
T = N + 1
A = np.matrix(np.zeros((T,T)))
A[T-1,:] = np.append(np.repeat(1.0,N),0)
sn = np.arange(T) - p
sn = sn.compress(sn != 0)
for i in np.arange(len(sn)) :
A[i,:] = np.append(((1./sn)*(-sn[i]/2.)),sn[i]**2.*dt**2./4.) #Eq.11 (PL)
A[i,i] = -1.
B = np.zeros(T)
B[N] = 1.0
#Compute the coefficients
cn=np.dot(A.I,B)##B
# cn=cn.transpose()
cn=np.array([i for i in cn.flat])
cn = cn[0:N] #Check the indices
#Compute the error
error = np.sqrt(np.sum(cn.transpose()/(sn*dt))**2. + np.sum( (cn.transpose()/(sn*dt))**2. ) );
return cn, sn if n else cn | [
"def",
"genweights",
"(",
"p",
",",
"q",
",",
"dt",
",",
"error",
"=",
"None",
",",
"n",
"=",
"False",
")",
":",
"p",
"=",
"np",
".",
"abs",
"(",
"p",
")",
"q",
"=",
"np",
".",
"abs",
"(",
"q",
")",
"#check inputs\r",
"if",
"(",
"-",
"p",
">",
"q",
")",
":",
"raise",
"\"genweights : P must be lesser than q\"",
"#Build matrices\r",
"N",
"=",
"p",
"+",
"q",
"T",
"=",
"N",
"+",
"1",
"A",
"=",
"np",
".",
"matrix",
"(",
"np",
".",
"zeros",
"(",
"(",
"T",
",",
"T",
")",
")",
")",
"A",
"[",
"T",
"-",
"1",
",",
":",
"]",
"=",
"np",
".",
"append",
"(",
"np",
".",
"repeat",
"(",
"1.0",
",",
"N",
")",
",",
"0",
")",
"sn",
"=",
"np",
".",
"arange",
"(",
"T",
")",
"-",
"p",
"sn",
"=",
"sn",
".",
"compress",
"(",
"sn",
"!=",
"0",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"sn",
")",
")",
":",
"A",
"[",
"i",
",",
":",
"]",
"=",
"np",
".",
"append",
"(",
"(",
"(",
"1.",
"/",
"sn",
")",
"*",
"(",
"-",
"sn",
"[",
"i",
"]",
"/",
"2.",
")",
")",
",",
"sn",
"[",
"i",
"]",
"**",
"2.",
"*",
"dt",
"**",
"2.",
"/",
"4.",
")",
"#Eq.11 (PL) \r",
"A",
"[",
"i",
",",
"i",
"]",
"=",
"-",
"1.",
"B",
"=",
"np",
".",
"zeros",
"(",
"T",
")",
"B",
"[",
"N",
"]",
"=",
"1.0",
"#Compute the coefficients\r",
"cn",
"=",
"np",
".",
"dot",
"(",
"A",
".",
"I",
",",
"B",
")",
"##B\r",
"# cn=cn.transpose()\r",
"cn",
"=",
"np",
".",
"array",
"(",
"[",
"i",
"for",
"i",
"in",
"cn",
".",
"flat",
"]",
")",
"cn",
"=",
"cn",
"[",
"0",
":",
"N",
"]",
"#Check the indices\r",
"#Compute the error\r",
"error",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"cn",
".",
"transpose",
"(",
")",
"/",
"(",
"sn",
"*",
"dt",
")",
")",
"**",
"2.",
"+",
"np",
".",
"sum",
"(",
"(",
"cn",
".",
"transpose",
"(",
")",
"/",
"(",
"sn",
"*",
"dt",
")",
")",
"**",
"2.",
")",
")",
"return",
"cn",
",",
"sn",
"if",
"n",
"else",
"cn"
]
| 32.130435 | 23.652174 |
def get(self, limit=None, page=None):
"""
get results from the db
return -- Iterator()
"""
has_more = False
self.bounds.paginate = True
limit_paginate, offset = self.bounds.get(limit, page)
self.default_val = []
results = self._query('get')
if limit_paginate:
self.bounds.paginate = False
if len(results) == limit_paginate:
has_more = True
results.pop(-1)
it = ResultsIterator(results, orm_class=self.orm_class, has_more=has_more, query=self)
return self.iterator_class(it) | [
"def",
"get",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"page",
"=",
"None",
")",
":",
"has_more",
"=",
"False",
"self",
".",
"bounds",
".",
"paginate",
"=",
"True",
"limit_paginate",
",",
"offset",
"=",
"self",
".",
"bounds",
".",
"get",
"(",
"limit",
",",
"page",
")",
"self",
".",
"default_val",
"=",
"[",
"]",
"results",
"=",
"self",
".",
"_query",
"(",
"'get'",
")",
"if",
"limit_paginate",
":",
"self",
".",
"bounds",
".",
"paginate",
"=",
"False",
"if",
"len",
"(",
"results",
")",
"==",
"limit_paginate",
":",
"has_more",
"=",
"True",
"results",
".",
"pop",
"(",
"-",
"1",
")",
"it",
"=",
"ResultsIterator",
"(",
"results",
",",
"orm_class",
"=",
"self",
".",
"orm_class",
",",
"has_more",
"=",
"has_more",
",",
"query",
"=",
"self",
")",
"return",
"self",
".",
"iterator_class",
"(",
"it",
")"
]
| 30.45 | 14.75 |
def find_executable(name, names=None, required=True):
"""Utility function to find an executable in PATH
name: program to find. Use given value if absolute path
names: list of additional names. For instance
>>> find_executable('sed', names=['gsed'])
required: If True, then the function raises an Exception
if the program is not found else the function returns name if
the program is not found.
"""
path_from_env = os.environ.get(name.upper())
if path_from_env is not None:
return path_from_env
names = [name] + (names or [])
for _name in names:
if osp.isabs(_name):
return _name
paths = os.environ.get('PATH', '').split(os.pathsep)
eax = find_in_paths(_name, paths)
if eax:
return eax
if required:
raise NameError('Could not find %s executable' % name)
else:
return name | [
"def",
"find_executable",
"(",
"name",
",",
"names",
"=",
"None",
",",
"required",
"=",
"True",
")",
":",
"path_from_env",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"name",
".",
"upper",
"(",
")",
")",
"if",
"path_from_env",
"is",
"not",
"None",
":",
"return",
"path_from_env",
"names",
"=",
"[",
"name",
"]",
"+",
"(",
"names",
"or",
"[",
"]",
")",
"for",
"_name",
"in",
"names",
":",
"if",
"osp",
".",
"isabs",
"(",
"_name",
")",
":",
"return",
"_name",
"paths",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATH'",
",",
"''",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"eax",
"=",
"find_in_paths",
"(",
"_name",
",",
"paths",
")",
"if",
"eax",
":",
"return",
"eax",
"if",
"required",
":",
"raise",
"NameError",
"(",
"'Could not find %s executable'",
"%",
"name",
")",
"else",
":",
"return",
"name"
]
| 32.814815 | 17.259259 |
def trigger_streamer(*inputs, **kwargs):
"""Trigger a streamer based on the index read from input b.
Returns:
list(IOTileReading)
"""
streamer_marker = kwargs['mark_streamer']
try:
reading = inputs[1].pop()
except StreamEmptyError:
return []
finally:
for input_x in inputs:
input_x.skip_all()
try:
streamer_marker(reading.value)
except ArgumentError:
return []
return [IOTileReading(0, 0, 0)] | [
"def",
"trigger_streamer",
"(",
"*",
"inputs",
",",
"*",
"*",
"kwargs",
")",
":",
"streamer_marker",
"=",
"kwargs",
"[",
"'mark_streamer'",
"]",
"try",
":",
"reading",
"=",
"inputs",
"[",
"1",
"]",
".",
"pop",
"(",
")",
"except",
"StreamEmptyError",
":",
"return",
"[",
"]",
"finally",
":",
"for",
"input_x",
"in",
"inputs",
":",
"input_x",
".",
"skip_all",
"(",
")",
"try",
":",
"streamer_marker",
"(",
"reading",
".",
"value",
")",
"except",
"ArgumentError",
":",
"return",
"[",
"]",
"return",
"[",
"IOTileReading",
"(",
"0",
",",
"0",
",",
"0",
")",
"]"
]
| 20.652174 | 19.347826 |
def metric_wind_dict_to_imperial(d):
"""
Converts all the wind values in a dict from meters/sec (metric measurement
system) to miles/hour (imperial measurement system)
.
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to miles/hour
"""
result = dict()
for key, value in d.items():
if key != 'deg': # do not convert wind degree
result[key] = value * MILES_PER_HOUR_FOR_ONE_METER_PER_SEC
else:
result[key] = value
return result | [
"def",
"metric_wind_dict_to_imperial",
"(",
"d",
")",
":",
"result",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"key",
"!=",
"'deg'",
":",
"# do not convert wind degree",
"result",
"[",
"key",
"]",
"=",
"value",
"*",
"MILES_PER_HOUR_FOR_ONE_METER_PER_SEC",
"else",
":",
"result",
"[",
"key",
"]",
"=",
"value",
"return",
"result"
]
| 31.210526 | 20.894737 |
def print_file_results(file_result):
"""Print the results of validating a file.
Args:
file_result: A FileValidationResults instance.
"""
print_results_header(file_result.filepath, file_result.is_valid)
for object_result in file_result.object_results:
if object_result.warnings:
print_warning_results(object_result, 1)
if object_result.errors:
print_schema_results(object_result, 1)
if file_result.fatal:
print_fatal_results(file_result.fatal, 1) | [
"def",
"print_file_results",
"(",
"file_result",
")",
":",
"print_results_header",
"(",
"file_result",
".",
"filepath",
",",
"file_result",
".",
"is_valid",
")",
"for",
"object_result",
"in",
"file_result",
".",
"object_results",
":",
"if",
"object_result",
".",
"warnings",
":",
"print_warning_results",
"(",
"object_result",
",",
"1",
")",
"if",
"object_result",
".",
"errors",
":",
"print_schema_results",
"(",
"object_result",
",",
"1",
")",
"if",
"file_result",
".",
"fatal",
":",
"print_fatal_results",
"(",
"file_result",
".",
"fatal",
",",
"1",
")"
]
| 30.176471 | 18.117647 |
def validate(self, model_name, object):
"""Validate an object against its swagger model"""
if model_name not in self.swagger_dict['definitions']:
raise ValidationError("Swagger spec has no definition for model %s" % model_name)
model_def = self.swagger_dict['definitions'][model_name]
log.debug("Validating %s" % model_name)
return validate_schema_object(self.spec, model_def, object) | [
"def",
"validate",
"(",
"self",
",",
"model_name",
",",
"object",
")",
":",
"if",
"model_name",
"not",
"in",
"self",
".",
"swagger_dict",
"[",
"'definitions'",
"]",
":",
"raise",
"ValidationError",
"(",
"\"Swagger spec has no definition for model %s\"",
"%",
"model_name",
")",
"model_def",
"=",
"self",
".",
"swagger_dict",
"[",
"'definitions'",
"]",
"[",
"model_name",
"]",
"log",
".",
"debug",
"(",
"\"Validating %s\"",
"%",
"model_name",
")",
"return",
"validate_schema_object",
"(",
"self",
".",
"spec",
",",
"model_def",
",",
"object",
")"
]
| 61.428571 | 19.142857 |
def get_institutes_trend_graph_urls(start, end):
""" Get all institute trend graphs. """
graph_list = []
for institute in Institute.objects.all():
urls = get_institute_trend_graph_url(institute, start, end)
urls['institute'] = institute
graph_list.append(urls)
return graph_list | [
"def",
"get_institutes_trend_graph_urls",
"(",
"start",
",",
"end",
")",
":",
"graph_list",
"=",
"[",
"]",
"for",
"institute",
"in",
"Institute",
".",
"objects",
".",
"all",
"(",
")",
":",
"urls",
"=",
"get_institute_trend_graph_url",
"(",
"institute",
",",
"start",
",",
"end",
")",
"urls",
"[",
"'institute'",
"]",
"=",
"institute",
"graph_list",
".",
"append",
"(",
"urls",
")",
"return",
"graph_list"
]
| 31.1 | 17.2 |
def get_seebeck(self, output='eigs', doping_levels=True):
"""
Gives the seebeck coefficient (microV/K) in either a
full 3x3 tensor form, as 3 eigenvalues, or as the average value
(trace/3.0) If doping_levels=True, the results are given at
different p and n doping
levels (given by self.doping), otherwise it is given as a series
of electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full
3x3 tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.
The 'p' links to Seebeck at p-type doping
and 'n' to the Seebeck at n-type doping. Otherwise, returns a
{temp:[]} dictionary
The result contains either the sorted three eigenvalues of
the symmetric
Seebeck tensor (output='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
units are microV/K
"""
return BoltztrapAnalyzer._format_to_output(self._seebeck,
self._seebeck_doping,
output,
doping_levels, 1e6) | [
"def",
"get_seebeck",
"(",
"self",
",",
"output",
"=",
"'eigs'",
",",
"doping_levels",
"=",
"True",
")",
":",
"return",
"BoltztrapAnalyzer",
".",
"_format_to_output",
"(",
"self",
".",
"_seebeck",
",",
"self",
".",
"_seebeck_doping",
",",
"output",
",",
"doping_levels",
",",
"1e6",
")"
]
| 49.382353 | 22.441176 |
def get_tokens(tokens):
"""Recursively gets tokens from a match list
Args:
tokens : List of tokens ['(', 'S', '(', 'NP', ')', ')']
Returns:
Stack of tokens
"""
tokens = tokens[1:-1]
ret = []
start = 0
stack = 0
for i in range(len(tokens)):
if tokens[i] == '(':
if stack == 0:
start = i
stack += 1
elif tokens[i] == ')':
stack -= 1
if stack < 0:
raise Exception('Bracket mismatch: ' + str(tokens))
if stack == 0:
ret.append(get_tokens(tokens[start:i + 1]))
else:
if stack == 0:
ret.append(tokens[i])
if stack != 0:
raise Exception('Bracket mismatch: ' + str(tokens))
return ret | [
"def",
"get_tokens",
"(",
"tokens",
")",
":",
"tokens",
"=",
"tokens",
"[",
"1",
":",
"-",
"1",
"]",
"ret",
"=",
"[",
"]",
"start",
"=",
"0",
"stack",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"tokens",
")",
")",
":",
"if",
"tokens",
"[",
"i",
"]",
"==",
"'('",
":",
"if",
"stack",
"==",
"0",
":",
"start",
"=",
"i",
"stack",
"+=",
"1",
"elif",
"tokens",
"[",
"i",
"]",
"==",
"')'",
":",
"stack",
"-=",
"1",
"if",
"stack",
"<",
"0",
":",
"raise",
"Exception",
"(",
"'Bracket mismatch: '",
"+",
"str",
"(",
"tokens",
")",
")",
"if",
"stack",
"==",
"0",
":",
"ret",
".",
"append",
"(",
"get_tokens",
"(",
"tokens",
"[",
"start",
":",
"i",
"+",
"1",
"]",
")",
")",
"else",
":",
"if",
"stack",
"==",
"0",
":",
"ret",
".",
"append",
"(",
"tokens",
"[",
"i",
"]",
")",
"if",
"stack",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"'Bracket mismatch: '",
"+",
"str",
"(",
"tokens",
")",
")",
"return",
"ret"
]
| 26.931034 | 18.275862 |
def get_all_last_24h_kline(self):
"""
获取所有ticker
:param _async:
:return:
"""
params = {}
url = u.MARKET_URL + '/market/tickers'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(http_get_request(url, params))
return handle
return _wrapper | [
"def",
"get_all_last_24h_kline",
"(",
"self",
")",
":",
"params",
"=",
"{",
"}",
"url",
"=",
"u",
".",
"MARKET_URL",
"+",
"'/market/tickers'",
"def",
"_wrapper",
"(",
"_func",
")",
":",
"@",
"wraps",
"(",
"_func",
")",
"def",
"handle",
"(",
")",
":",
"_func",
"(",
"http_get_request",
"(",
"url",
",",
"params",
")",
")",
"return",
"handle",
"return",
"_wrapper"
]
| 20.823529 | 17.882353 |
def retrieve_profile_extension_records(self, profile_extension, field_list, ids_to_retrieve,
query_column='RIID'):
""" Responsys.retrieveProfileExtensionRecords call
Accepts:
InteractObject profile_extension
list field_list
list ids_to_retrieve
string query_column
default: 'RIID'
Returns RecordData
"""
profile_extension = profile_extension.get_soap_object(self.client)
return RecordData.from_soap_type(
self.call('retrieveProfileExtensionRecords',
profile_extension, query_column, field_list, ids_to_retrieve)) | [
"def",
"retrieve_profile_extension_records",
"(",
"self",
",",
"profile_extension",
",",
"field_list",
",",
"ids_to_retrieve",
",",
"query_column",
"=",
"'RIID'",
")",
":",
"profile_extension",
"=",
"profile_extension",
".",
"get_soap_object",
"(",
"self",
".",
"client",
")",
"return",
"RecordData",
".",
"from_soap_type",
"(",
"self",
".",
"call",
"(",
"'retrieveProfileExtensionRecords'",
",",
"profile_extension",
",",
"query_column",
",",
"field_list",
",",
"ids_to_retrieve",
")",
")"
]
| 40.411765 | 19.529412 |
def mimeData( self, items ):
"""
Returns the mime data for dragging for this instance.
:param items | [<QTableWidgetItem>, ..]
"""
func = self.dataCollector()
if ( func ):
return func(self, items)
return super(XTableWidget, self).mimeData(items) | [
"def",
"mimeData",
"(",
"self",
",",
"items",
")",
":",
"func",
"=",
"self",
".",
"dataCollector",
"(",
")",
"if",
"(",
"func",
")",
":",
"return",
"func",
"(",
"self",
",",
"items",
")",
"return",
"super",
"(",
"XTableWidget",
",",
"self",
")",
".",
"mimeData",
"(",
"items",
")"
]
| 30 | 13.636364 |
def resume(self, mask, filename, port, pos):
"""Resume a DCC send"""
self.connections['send']['masks'][mask][port].offset = pos
message = 'DCC ACCEPT %s %d %d' % (filename, port, pos)
self.bot.ctcp(mask, message) | [
"def",
"resume",
"(",
"self",
",",
"mask",
",",
"filename",
",",
"port",
",",
"pos",
")",
":",
"self",
".",
"connections",
"[",
"'send'",
"]",
"[",
"'masks'",
"]",
"[",
"mask",
"]",
"[",
"port",
"]",
".",
"offset",
"=",
"pos",
"message",
"=",
"'DCC ACCEPT %s %d %d'",
"%",
"(",
"filename",
",",
"port",
",",
"pos",
")",
"self",
".",
"bot",
".",
"ctcp",
"(",
"mask",
",",
"message",
")"
]
| 48 | 11.4 |
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out | [
"def",
"_str_member_list",
"(",
"self",
",",
"name",
")",
":",
"out",
"=",
"[",
"]",
"if",
"self",
"[",
"name",
"]",
":",
"out",
"+=",
"[",
"'.. rubric:: %s'",
"%",
"name",
",",
"''",
"]",
"prefix",
"=",
"getattr",
"(",
"self",
",",
"'_name'",
",",
"''",
")",
"if",
"prefix",
":",
"prefix",
"=",
"'~%s.'",
"%",
"prefix",
"autosum",
"=",
"[",
"]",
"others",
"=",
"[",
"]",
"for",
"param",
",",
"param_type",
",",
"desc",
"in",
"self",
"[",
"name",
"]",
":",
"param",
"=",
"param",
".",
"strip",
"(",
")",
"if",
"not",
"self",
".",
"_obj",
"or",
"hasattr",
"(",
"self",
".",
"_obj",
",",
"param",
")",
":",
"autosum",
"+=",
"[",
"\" %s%s\"",
"%",
"(",
"prefix",
",",
"param",
")",
"]",
"else",
":",
"others",
".",
"append",
"(",
"(",
"param",
",",
"param_type",
",",
"desc",
")",
")",
"if",
"autosum",
":",
"# GAEL: Toctree commented out below because it creates",
"# hundreds of sphinx warnings",
"# out += ['.. autosummary::', ' :toctree:', '']",
"out",
"+=",
"[",
"'.. autosummary::'",
",",
"''",
"]",
"out",
"+=",
"autosum",
"if",
"others",
":",
"maxlen_0",
"=",
"max",
"(",
"[",
"len",
"(",
"x",
"[",
"0",
"]",
")",
"for",
"x",
"in",
"others",
"]",
")",
"maxlen_1",
"=",
"max",
"(",
"[",
"len",
"(",
"x",
"[",
"1",
"]",
")",
"for",
"x",
"in",
"others",
"]",
")",
"hdr",
"=",
"\"=\"",
"*",
"maxlen_0",
"+",
"\" \"",
"+",
"\"=\"",
"*",
"maxlen_1",
"+",
"\" \"",
"+",
"\"=\"",
"*",
"10",
"fmt",
"=",
"'%%%ds %%%ds '",
"%",
"(",
"maxlen_0",
",",
"maxlen_1",
")",
"n_indent",
"=",
"maxlen_0",
"+",
"maxlen_1",
"+",
"4",
"out",
"+=",
"[",
"hdr",
"]",
"for",
"param",
",",
"param_type",
",",
"desc",
"in",
"others",
":",
"out",
"+=",
"[",
"fmt",
"%",
"(",
"param",
".",
"strip",
"(",
")",
",",
"param_type",
")",
"]",
"out",
"+=",
"self",
".",
"_str_indent",
"(",
"desc",
",",
"n_indent",
")",
"out",
"+=",
"[",
"hdr",
"]",
"out",
"+=",
"[",
"''",
"]",
"return",
"out"
]
| 36.55814 | 18.372093 |
def spinner(self, spinner=None):
"""Setter for spinner property.
Parameters
----------
spinner : dict, str
Defines the spinner value with frame and interval
"""
self._spinner = self._get_spinner(spinner)
self._frame_index = 0
self._text_index = 0 | [
"def",
"spinner",
"(",
"self",
",",
"spinner",
"=",
"None",
")",
":",
"self",
".",
"_spinner",
"=",
"self",
".",
"_get_spinner",
"(",
"spinner",
")",
"self",
".",
"_frame_index",
"=",
"0",
"self",
".",
"_text_index",
"=",
"0"
]
| 28.454545 | 14.454545 |
def is_valid_chunksize(chunk_size):
"""Check if size is valid."""
min_csize = current_app.config['FILES_REST_MULTIPART_CHUNKSIZE_MIN']
max_csize = current_app.config['FILES_REST_MULTIPART_CHUNKSIZE_MAX']
return chunk_size >= min_csize and chunk_size <= max_csize | [
"def",
"is_valid_chunksize",
"(",
"chunk_size",
")",
":",
"min_csize",
"=",
"current_app",
".",
"config",
"[",
"'FILES_REST_MULTIPART_CHUNKSIZE_MIN'",
"]",
"max_csize",
"=",
"current_app",
".",
"config",
"[",
"'FILES_REST_MULTIPART_CHUNKSIZE_MAX'",
"]",
"return",
"chunk_size",
">=",
"min_csize",
"and",
"chunk_size",
"<=",
"max_csize"
]
| 58 | 20.6 |
def _create_singleframe_bvals_bvecs(grouped_dicoms, bval_file, bvec_file, nifti, nifti_file):
"""
Write the bvals from the sorted dicom files to a bval file
"""
# create the empty arrays
bvals = numpy.zeros([len(grouped_dicoms)], dtype=numpy.int32)
bvecs = numpy.zeros([len(grouped_dicoms), 3])
# loop over all timepoints and create a list with all bvals and bvecs
if _is_bval_type_a(grouped_dicoms):
bval_tag = Tag(0x2001, 0x1003)
bvec_x_tag = Tag(0x2005, 0x10b0)
bvec_y_tag = Tag(0x2005, 0x10b1)
bvec_z_tag = Tag(0x2005, 0x10b2)
for stack_index in range(0, len(grouped_dicoms)):
bvals[stack_index] = common.get_fl_value(grouped_dicoms[stack_index][0][bval_tag])
bvecs[stack_index, :] = [common.get_fl_value(grouped_dicoms[stack_index][0][bvec_x_tag]),
common.get_fl_value(grouped_dicoms[stack_index][0][bvec_y_tag]),
common.get_fl_value(grouped_dicoms[stack_index][0][bvec_z_tag])]
elif _is_bval_type_b(grouped_dicoms):
bval_tag = Tag(0x0018, 0x9087)
bvec_tag = Tag(0x0018, 0x9089)
for stack_index in range(0, len(grouped_dicoms)):
bvals[stack_index] = common.get_fd_value(grouped_dicoms[stack_index][0][bval_tag])
bvecs[stack_index, :] = common.get_fd_array_value(grouped_dicoms[stack_index][0][bvec_tag], 3)
# truncate nifti if needed
nifti, bvals, bvecs = _fix_diffusion_images(bvals, bvecs, nifti, nifti_file)
# save the found bvecs to the file
if numpy.count_nonzero(bvals) > 0 or numpy.count_nonzero(bvecs) > 0:
common.write_bval_file(bvals, bval_file)
common.write_bvec_file(bvecs, bvec_file)
else:
bval_file = None
bvec_file = None
bvals = None
bvecs = None
return nifti, bvals, bvecs, bval_file, bvec_file | [
"def",
"_create_singleframe_bvals_bvecs",
"(",
"grouped_dicoms",
",",
"bval_file",
",",
"bvec_file",
",",
"nifti",
",",
"nifti_file",
")",
":",
"# create the empty arrays",
"bvals",
"=",
"numpy",
".",
"zeros",
"(",
"[",
"len",
"(",
"grouped_dicoms",
")",
"]",
",",
"dtype",
"=",
"numpy",
".",
"int32",
")",
"bvecs",
"=",
"numpy",
".",
"zeros",
"(",
"[",
"len",
"(",
"grouped_dicoms",
")",
",",
"3",
"]",
")",
"# loop over all timepoints and create a list with all bvals and bvecs",
"if",
"_is_bval_type_a",
"(",
"grouped_dicoms",
")",
":",
"bval_tag",
"=",
"Tag",
"(",
"0x2001",
",",
"0x1003",
")",
"bvec_x_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x10b0",
")",
"bvec_y_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x10b1",
")",
"bvec_z_tag",
"=",
"Tag",
"(",
"0x2005",
",",
"0x10b2",
")",
"for",
"stack_index",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"grouped_dicoms",
")",
")",
":",
"bvals",
"[",
"stack_index",
"]",
"=",
"common",
".",
"get_fl_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bval_tag",
"]",
")",
"bvecs",
"[",
"stack_index",
",",
":",
"]",
"=",
"[",
"common",
".",
"get_fl_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bvec_x_tag",
"]",
")",
",",
"common",
".",
"get_fl_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bvec_y_tag",
"]",
")",
",",
"common",
".",
"get_fl_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bvec_z_tag",
"]",
")",
"]",
"elif",
"_is_bval_type_b",
"(",
"grouped_dicoms",
")",
":",
"bval_tag",
"=",
"Tag",
"(",
"0x0018",
",",
"0x9087",
")",
"bvec_tag",
"=",
"Tag",
"(",
"0x0018",
",",
"0x9089",
")",
"for",
"stack_index",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"grouped_dicoms",
")",
")",
":",
"bvals",
"[",
"stack_index",
"]",
"=",
"common",
".",
"get_fd_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bval_tag",
"]",
")",
"bvecs",
"[",
"stack_index",
",",
":",
"]",
"=",
"common",
".",
"get_fd_array_value",
"(",
"grouped_dicoms",
"[",
"stack_index",
"]",
"[",
"0",
"]",
"[",
"bvec_tag",
"]",
",",
"3",
")",
"# truncate nifti if needed",
"nifti",
",",
"bvals",
",",
"bvecs",
"=",
"_fix_diffusion_images",
"(",
"bvals",
",",
"bvecs",
",",
"nifti",
",",
"nifti_file",
")",
"# save the found bvecs to the file",
"if",
"numpy",
".",
"count_nonzero",
"(",
"bvals",
")",
">",
"0",
"or",
"numpy",
".",
"count_nonzero",
"(",
"bvecs",
")",
">",
"0",
":",
"common",
".",
"write_bval_file",
"(",
"bvals",
",",
"bval_file",
")",
"common",
".",
"write_bvec_file",
"(",
"bvecs",
",",
"bvec_file",
")",
"else",
":",
"bval_file",
"=",
"None",
"bvec_file",
"=",
"None",
"bvals",
"=",
"None",
"bvecs",
"=",
"None",
"return",
"nifti",
",",
"bvals",
",",
"bvecs",
",",
"bval_file",
",",
"bvec_file"
]
| 46.875 | 23.175 |
def debug_object(obj, log_level: int = logging.DEBUG) -> None:
"""
Sends details about a Python to the log, specifically its ``repr()``
representation, and all of its attributes with their name, value, and type.
Args:
obj: object to debug
log_level: log level to use; default is ``logging.DEBUG``
"""
msgs = ["For {o!r}:".format(o=obj)]
for attrname in dir(obj):
attribute = getattr(obj, attrname)
msgs.append("- {an!r}: {at!r}, of type {t!r}".format(
an=attrname, at=attribute, t=type(attribute)))
log.log(log_level, "{}", "\n".join(msgs)) | [
"def",
"debug_object",
"(",
"obj",
",",
"log_level",
":",
"int",
"=",
"logging",
".",
"DEBUG",
")",
"->",
"None",
":",
"msgs",
"=",
"[",
"\"For {o!r}:\"",
".",
"format",
"(",
"o",
"=",
"obj",
")",
"]",
"for",
"attrname",
"in",
"dir",
"(",
"obj",
")",
":",
"attribute",
"=",
"getattr",
"(",
"obj",
",",
"attrname",
")",
"msgs",
".",
"append",
"(",
"\"- {an!r}: {at!r}, of type {t!r}\"",
".",
"format",
"(",
"an",
"=",
"attrname",
",",
"at",
"=",
"attribute",
",",
"t",
"=",
"type",
"(",
"attribute",
")",
")",
")",
"log",
".",
"log",
"(",
"log_level",
",",
"\"{}\"",
",",
"\"\\n\"",
".",
"join",
"(",
"msgs",
")",
")"
]
| 40.2 | 17.266667 |
def path_lookup(data_obj, xj_path, create_dict_path=False):
"""Looks up a xj path in the data_obj.
:param dict|list data_obj: An object to look into.
:param str xj_path: A path to extract data from.
:param bool create_dict_path: Create an element if type is specified.
:return: A tuple where 0 value is an extracted value and a second
field that tells if value either was found or not found.
"""
if not xj_path or xj_path == '.':
return data_obj, True
res = list(split(xj_path, '.', maxsplit=1))
top_key = res[0]
leftover = res[1] if len(res) > 1 else None
if top_key == '*':
return _full_sub_array(data_obj, leftover, create_dict_path)
elif top_key.startswith('@'):
return _single_array_element(data_obj, leftover, top_key,
create_dict_path)
else:
val_type, top_key = _clean_key_type(top_key)
top_key = unescape(top_key)
if top_key in data_obj:
value = data_obj[top_key]
if val_type is not None and not isinstance(value, val_type):
raise XJPathError(
'Key %s expects type "%s", but found value type is "%s"' %
(top_key, val_type.__name__, type(value).__name__))
if leftover:
return path_lookup(value, leftover, create_dict_path)
else:
return value, True
else:
if val_type is not None:
if not isinstance(data_obj, dict):
raise XJPathError('Accessed object must be a dict type '
'for the key: "%s"' % top_key)
if create_dict_path:
data_obj[top_key] = val_type()
else:
return None, False
if leftover:
return path_lookup(data_obj[top_key], leftover,
create_dict_path)
else:
return data_obj[top_key], True
return None, False | [
"def",
"path_lookup",
"(",
"data_obj",
",",
"xj_path",
",",
"create_dict_path",
"=",
"False",
")",
":",
"if",
"not",
"xj_path",
"or",
"xj_path",
"==",
"'.'",
":",
"return",
"data_obj",
",",
"True",
"res",
"=",
"list",
"(",
"split",
"(",
"xj_path",
",",
"'.'",
",",
"maxsplit",
"=",
"1",
")",
")",
"top_key",
"=",
"res",
"[",
"0",
"]",
"leftover",
"=",
"res",
"[",
"1",
"]",
"if",
"len",
"(",
"res",
")",
">",
"1",
"else",
"None",
"if",
"top_key",
"==",
"'*'",
":",
"return",
"_full_sub_array",
"(",
"data_obj",
",",
"leftover",
",",
"create_dict_path",
")",
"elif",
"top_key",
".",
"startswith",
"(",
"'@'",
")",
":",
"return",
"_single_array_element",
"(",
"data_obj",
",",
"leftover",
",",
"top_key",
",",
"create_dict_path",
")",
"else",
":",
"val_type",
",",
"top_key",
"=",
"_clean_key_type",
"(",
"top_key",
")",
"top_key",
"=",
"unescape",
"(",
"top_key",
")",
"if",
"top_key",
"in",
"data_obj",
":",
"value",
"=",
"data_obj",
"[",
"top_key",
"]",
"if",
"val_type",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"value",
",",
"val_type",
")",
":",
"raise",
"XJPathError",
"(",
"'Key %s expects type \"%s\", but found value type is \"%s\"'",
"%",
"(",
"top_key",
",",
"val_type",
".",
"__name__",
",",
"type",
"(",
"value",
")",
".",
"__name__",
")",
")",
"if",
"leftover",
":",
"return",
"path_lookup",
"(",
"value",
",",
"leftover",
",",
"create_dict_path",
")",
"else",
":",
"return",
"value",
",",
"True",
"else",
":",
"if",
"val_type",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"data_obj",
",",
"dict",
")",
":",
"raise",
"XJPathError",
"(",
"'Accessed object must be a dict type '",
"'for the key: \"%s\"'",
"%",
"top_key",
")",
"if",
"create_dict_path",
":",
"data_obj",
"[",
"top_key",
"]",
"=",
"val_type",
"(",
")",
"else",
":",
"return",
"None",
",",
"False",
"if",
"leftover",
":",
"return",
"path_lookup",
"(",
"data_obj",
"[",
"top_key",
"]",
",",
"leftover",
",",
"create_dict_path",
")",
"else",
":",
"return",
"data_obj",
"[",
"top_key",
"]",
",",
"True",
"return",
"None",
",",
"False"
]
| 41.836735 | 17.77551 |
def sample(self):
"""
This is the core sampling method. Samples a state from a
demonstration, in accordance with the configuration.
"""
# chooses a sampling scheme randomly based on the mixing ratios
seed = random.uniform(0, 1)
ratio = np.cumsum(self.scheme_ratios)
ratio = ratio > seed
for i, v in enumerate(ratio):
if v:
break
sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]])
return sample_method() | [
"def",
"sample",
"(",
"self",
")",
":",
"# chooses a sampling scheme randomly based on the mixing ratios",
"seed",
"=",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
"ratio",
"=",
"np",
".",
"cumsum",
"(",
"self",
".",
"scheme_ratios",
")",
"ratio",
"=",
"ratio",
">",
"seed",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"ratio",
")",
":",
"if",
"v",
":",
"break",
"sample_method",
"=",
"getattr",
"(",
"self",
",",
"self",
".",
"sample_method_dict",
"[",
"self",
".",
"sampling_schemes",
"[",
"i",
"]",
"]",
")",
"return",
"sample_method",
"(",
")"
]
| 33.4375 | 18.9375 |
def validate(self, cmd, messages=None):
"""Returns True if the given Command is valid, False otherwise.
Validation error messages are appended to an optional messages
array.
"""
valid = True
args = [ arg for arg in cmd.args if arg is not None ]
if self.nargs != len(args):
valid = False
if messages is not None:
msg = 'Expected %d arguments, but received %d.'
messages.append(msg % (self.nargs, len(args)))
for defn, value in zip(self.args, cmd.args):
if value is None:
valid = False
if messages is not None:
messages.append('Argument "%s" is missing.' % defn.name)
elif defn.validate(value, messages) is False:
valid = False
if len(cmd._unrecognized) > 0:
valid = False
if messages is not None:
for name in cmd.unrecognized:
messages.append('Argument "%s" is unrecognized.' % name)
return valid | [
"def",
"validate",
"(",
"self",
",",
"cmd",
",",
"messages",
"=",
"None",
")",
":",
"valid",
"=",
"True",
"args",
"=",
"[",
"arg",
"for",
"arg",
"in",
"cmd",
".",
"args",
"if",
"arg",
"is",
"not",
"None",
"]",
"if",
"self",
".",
"nargs",
"!=",
"len",
"(",
"args",
")",
":",
"valid",
"=",
"False",
"if",
"messages",
"is",
"not",
"None",
":",
"msg",
"=",
"'Expected %d arguments, but received %d.'",
"messages",
".",
"append",
"(",
"msg",
"%",
"(",
"self",
".",
"nargs",
",",
"len",
"(",
"args",
")",
")",
")",
"for",
"defn",
",",
"value",
"in",
"zip",
"(",
"self",
".",
"args",
",",
"cmd",
".",
"args",
")",
":",
"if",
"value",
"is",
"None",
":",
"valid",
"=",
"False",
"if",
"messages",
"is",
"not",
"None",
":",
"messages",
".",
"append",
"(",
"'Argument \"%s\" is missing.'",
"%",
"defn",
".",
"name",
")",
"elif",
"defn",
".",
"validate",
"(",
"value",
",",
"messages",
")",
"is",
"False",
":",
"valid",
"=",
"False",
"if",
"len",
"(",
"cmd",
".",
"_unrecognized",
")",
">",
"0",
":",
"valid",
"=",
"False",
"if",
"messages",
"is",
"not",
"None",
":",
"for",
"name",
"in",
"cmd",
".",
"unrecognized",
":",
"messages",
".",
"append",
"(",
"'Argument \"%s\" is unrecognized.'",
"%",
"name",
")",
"return",
"valid"
]
| 36.586207 | 17.551724 |
def apply_filters(target, lines):
"""
Applys filters to the lines of a datasource. This function is used only in
integration tests. Filters are applied in an equivalent but more performant
way at run time.
"""
filters = get_filters(target)
if filters:
for l in lines:
if any(f in l for f in filters):
yield l
else:
for l in lines:
yield l | [
"def",
"apply_filters",
"(",
"target",
",",
"lines",
")",
":",
"filters",
"=",
"get_filters",
"(",
"target",
")",
"if",
"filters",
":",
"for",
"l",
"in",
"lines",
":",
"if",
"any",
"(",
"f",
"in",
"l",
"for",
"f",
"in",
"filters",
")",
":",
"yield",
"l",
"else",
":",
"for",
"l",
"in",
"lines",
":",
"yield",
"l"
]
| 29.5 | 17.357143 |
def add_header(self, name, value):
'''Attach an email header to send with the message.
:param name: The name of the header value.
:param value: The header value.
'''
if self.headers is None:
self.headers = []
self.headers.append(dict(Name=name, Value=value)) | [
"def",
"add_header",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"if",
"self",
".",
"headers",
"is",
"None",
":",
"self",
".",
"headers",
"=",
"[",
"]",
"self",
".",
"headers",
".",
"append",
"(",
"dict",
"(",
"Name",
"=",
"name",
",",
"Value",
"=",
"value",
")",
")"
]
| 34.555556 | 15.666667 |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._uuid is not None:
return False
if self._avatar is not None:
return False
if self._public_nick_name is not None:
return False
if self._display_name is not None:
return False
if self._country is not None:
return False
return True | [
"def",
"is_all_field_none",
"(",
"self",
")",
":",
"if",
"self",
".",
"_uuid",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_avatar",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_public_nick_name",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_display_name",
"is",
"not",
"None",
":",
"return",
"False",
"if",
"self",
".",
"_country",
"is",
"not",
"None",
":",
"return",
"False",
"return",
"True"
]
| 19.238095 | 18.761905 |
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field_name in node._fields:
setattr(node, field_name, self.visit(getattr(node, field_name)))
return node | [
"def",
"generic_visit",
"(",
"self",
",",
"node",
")",
":",
"for",
"field_name",
"in",
"node",
".",
"_fields",
":",
"setattr",
"(",
"node",
",",
"field_name",
",",
"self",
".",
"visit",
"(",
"getattr",
"(",
"node",
",",
"field_name",
")",
")",
")",
"return",
"node"
]
| 47 | 13.6 |
def _hash_file(self, algo):
"""Get the hash of the given file
:param algo: The algorithm to use.
:type algo: str
:return: The hexdigest of the data.
:rtype: str
"""
# We het the algorithm function.
hash_data = getattr(hashlib, algo)()
with open(self.path, "rb") as file:
# We open an read the parsed path.
# We read the content.
content = file.read()
# We parse the content to the hash algorithm.
hash_data.update(content)
# And we extract and return the hash.
return hash_data.hexdigest() | [
"def",
"_hash_file",
"(",
"self",
",",
"algo",
")",
":",
"# We het the algorithm function.",
"hash_data",
"=",
"getattr",
"(",
"hashlib",
",",
"algo",
")",
"(",
")",
"with",
"open",
"(",
"self",
".",
"path",
",",
"\"rb\"",
")",
"as",
"file",
":",
"# We open an read the parsed path.",
"# We read the content.",
"content",
"=",
"file",
".",
"read",
"(",
")",
"# We parse the content to the hash algorithm.",
"hash_data",
".",
"update",
"(",
"content",
")",
"# And we extract and return the hash.",
"return",
"hash_data",
".",
"hexdigest",
"(",
")"
]
| 25.875 | 16.291667 |
def _namematcher(regex):
"""Checks if a target name matches with an input regular expression."""
matcher = re_compile(regex)
def match(target):
target_name = getattr(target, '__name__', '')
result = matcher.match(target_name)
return result
return match | [
"def",
"_namematcher",
"(",
"regex",
")",
":",
"matcher",
"=",
"re_compile",
"(",
"regex",
")",
"def",
"match",
"(",
"target",
")",
":",
"target_name",
"=",
"getattr",
"(",
"target",
",",
"'__name__'",
",",
"''",
")",
"result",
"=",
"matcher",
".",
"match",
"(",
"target_name",
")",
"return",
"result",
"return",
"match"
]
| 25.909091 | 20.181818 |
def getRing(startAtom, atomSet, lookup, oatoms):
"""getRing(startAtom, atomSet, lookup, oatoms)->atoms, bonds
starting at startAtom do a bfs traversal through the atoms
in atomSet and return the smallest ring found
returns (), () on failure
note: atoms and bonds are not returned in traversal order"""
path = {}
bpaths = {}
for atomID in atomSet.keys():
# initially the paths are empty
path[atomID] = None
bpaths[atomID] = []
q = []
handle = startAtom.handle
for atom in oatoms[handle]:
q.append((atom, handle))
path[atom.handle] = {atom.handle:1, handle:1}
bpaths[atom.handle] = [startAtom.findbond(atom)]
qIndex = 0
lenQ = len(q)
while qIndex < lenQ:
current, sourceHandle = q[qIndex]
handle = current.handle
qIndex += 1
for next in oatoms[handle]:
m = next.handle
if m != sourceHandle:
if not atomSet.has_key(m):
return (), ()
if path.get(m, None):
intersections = 0
for atom in path[handle].keys():
if path[m].has_key(atom):
intersections = intersections + 1
sharedAtom = atom
if intersections == 1:
del path[handle][sharedAtom]
path[handle].update(path[m])
result = path[handle].keys()
bond = next.findbond(current)
# assert bond not in bpaths[handle] and bond not in bpaths[m]
bonds = bpaths[handle] + bpaths[m] + [bond]
return result, bonds
else:
path[m] = path[handle].copy()
path[m][m] = 1
bond = next.findbond(current)
# assert bond not in bpaths[m] and bond not in bpaths[handle]
bpaths[m] = bpaths[handle] + [next.findbond(current)]
q.append((next, handle))
lenQ = lenQ + 1
return (), () | [
"def",
"getRing",
"(",
"startAtom",
",",
"atomSet",
",",
"lookup",
",",
"oatoms",
")",
":",
"path",
"=",
"{",
"}",
"bpaths",
"=",
"{",
"}",
"for",
"atomID",
"in",
"atomSet",
".",
"keys",
"(",
")",
":",
"# initially the paths are empty",
"path",
"[",
"atomID",
"]",
"=",
"None",
"bpaths",
"[",
"atomID",
"]",
"=",
"[",
"]",
"q",
"=",
"[",
"]",
"handle",
"=",
"startAtom",
".",
"handle",
"for",
"atom",
"in",
"oatoms",
"[",
"handle",
"]",
":",
"q",
".",
"append",
"(",
"(",
"atom",
",",
"handle",
")",
")",
"path",
"[",
"atom",
".",
"handle",
"]",
"=",
"{",
"atom",
".",
"handle",
":",
"1",
",",
"handle",
":",
"1",
"}",
"bpaths",
"[",
"atom",
".",
"handle",
"]",
"=",
"[",
"startAtom",
".",
"findbond",
"(",
"atom",
")",
"]",
"qIndex",
"=",
"0",
"lenQ",
"=",
"len",
"(",
"q",
")",
"while",
"qIndex",
"<",
"lenQ",
":",
"current",
",",
"sourceHandle",
"=",
"q",
"[",
"qIndex",
"]",
"handle",
"=",
"current",
".",
"handle",
"qIndex",
"+=",
"1",
"for",
"next",
"in",
"oatoms",
"[",
"handle",
"]",
":",
"m",
"=",
"next",
".",
"handle",
"if",
"m",
"!=",
"sourceHandle",
":",
"if",
"not",
"atomSet",
".",
"has_key",
"(",
"m",
")",
":",
"return",
"(",
")",
",",
"(",
")",
"if",
"path",
".",
"get",
"(",
"m",
",",
"None",
")",
":",
"intersections",
"=",
"0",
"for",
"atom",
"in",
"path",
"[",
"handle",
"]",
".",
"keys",
"(",
")",
":",
"if",
"path",
"[",
"m",
"]",
".",
"has_key",
"(",
"atom",
")",
":",
"intersections",
"=",
"intersections",
"+",
"1",
"sharedAtom",
"=",
"atom",
"if",
"intersections",
"==",
"1",
":",
"del",
"path",
"[",
"handle",
"]",
"[",
"sharedAtom",
"]",
"path",
"[",
"handle",
"]",
".",
"update",
"(",
"path",
"[",
"m",
"]",
")",
"result",
"=",
"path",
"[",
"handle",
"]",
".",
"keys",
"(",
")",
"bond",
"=",
"next",
".",
"findbond",
"(",
"current",
")",
"# assert bond not in bpaths[handle] and bond not in bpaths[m]",
"bonds",
"=",
"bpaths",
"[",
"handle",
"]",
"+",
"bpaths",
"[",
"m",
"]",
"+",
"[",
"bond",
"]",
"return",
"result",
",",
"bonds",
"else",
":",
"path",
"[",
"m",
"]",
"=",
"path",
"[",
"handle",
"]",
".",
"copy",
"(",
")",
"path",
"[",
"m",
"]",
"[",
"m",
"]",
"=",
"1",
"bond",
"=",
"next",
".",
"findbond",
"(",
"current",
")",
"# assert bond not in bpaths[m] and bond not in bpaths[handle]",
"bpaths",
"[",
"m",
"]",
"=",
"bpaths",
"[",
"handle",
"]",
"+",
"[",
"next",
".",
"findbond",
"(",
"current",
")",
"]",
"q",
".",
"append",
"(",
"(",
"next",
",",
"handle",
")",
")",
"lenQ",
"=",
"lenQ",
"+",
"1",
"return",
"(",
")",
",",
"(",
")"
]
| 29.209677 | 19.177419 |
def getIPaddresses(self):
"""identify the IP addresses where this process client will launch the SC2 client"""
if not self.ipAddress:
self.ipAddress = ipAddresses.getAll() # update with IP address
return self.ipAddress | [
"def",
"getIPaddresses",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"ipAddress",
":",
"self",
".",
"ipAddress",
"=",
"ipAddresses",
".",
"getAll",
"(",
")",
"# update with IP address",
"return",
"self",
".",
"ipAddress"
]
| 50 | 14 |
def getPrioritySortkey(self):
"""Returns the key that will be used to sort the current Analysis
Request based on both its priority and creation date. On ASC sorting,
the oldest item with highest priority will be displayed.
:return: string used for sorting
"""
priority = self.getPriority()
created_date = self.created().ISO8601()
return '%s.%s' % (priority, created_date) | [
"def",
"getPrioritySortkey",
"(",
"self",
")",
":",
"priority",
"=",
"self",
".",
"getPriority",
"(",
")",
"created_date",
"=",
"self",
".",
"created",
"(",
")",
".",
"ISO8601",
"(",
")",
"return",
"'%s.%s'",
"%",
"(",
"priority",
",",
"created_date",
")"
]
| 47.444444 | 10.111111 |
def current():
"""
Returns the current environment manager for the projex system.
:return <EnvManager>
"""
if not EnvManager._current:
path = os.environ.get('PROJEX_ENVMGR_PATH')
module = os.environ.get('PROJEX_ENVMGR_MODULE')
clsname = os.environ.get('PROJEX_ENVMGR_CLASS')
cls = EnvManager
if module and clsname:
# check if the user specified an import path
if path:
logger.info('Adding env manager path: %s' % path)
sys.path.insert(0, path)
logger.info('Loading env manager: %s.%s' % (module, clsname))
try:
__import__(module)
mod = sys.modules[module]
cls = getattr(mod, clsname)
except ImportError:
logger.error('Could not import env manager %s', module)
except KeyError:
logger.error('Could not import env manager %s', module)
except AttributeError:
msg = '%s is not a valid class of %s' % (clsname, module)
logger.error(msg)
EnvManager._current = cls()
return EnvManager._current | [
"def",
"current",
"(",
")",
":",
"if",
"not",
"EnvManager",
".",
"_current",
":",
"path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PROJEX_ENVMGR_PATH'",
")",
"module",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PROJEX_ENVMGR_MODULE'",
")",
"clsname",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PROJEX_ENVMGR_CLASS'",
")",
"cls",
"=",
"EnvManager",
"if",
"module",
"and",
"clsname",
":",
"# check if the user specified an import path",
"if",
"path",
":",
"logger",
".",
"info",
"(",
"'Adding env manager path: %s'",
"%",
"path",
")",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"path",
")",
"logger",
".",
"info",
"(",
"'Loading env manager: %s.%s'",
"%",
"(",
"module",
",",
"clsname",
")",
")",
"try",
":",
"__import__",
"(",
"module",
")",
"mod",
"=",
"sys",
".",
"modules",
"[",
"module",
"]",
"cls",
"=",
"getattr",
"(",
"mod",
",",
"clsname",
")",
"except",
"ImportError",
":",
"logger",
".",
"error",
"(",
"'Could not import env manager %s'",
",",
"module",
")",
"except",
"KeyError",
":",
"logger",
".",
"error",
"(",
"'Could not import env manager %s'",
",",
"module",
")",
"except",
"AttributeError",
":",
"msg",
"=",
"'%s is not a valid class of %s'",
"%",
"(",
"clsname",
",",
"module",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"EnvManager",
".",
"_current",
"=",
"cls",
"(",
")",
"return",
"EnvManager",
".",
"_current"
]
| 34.648649 | 19.567568 |
def mag_roll(RAW_IMU, inclination, declination):
'''estimate roll from mag'''
m = mag_rotation(RAW_IMU, inclination, declination)
(r, p, y) = m.to_euler()
return degrees(r) | [
"def",
"mag_roll",
"(",
"RAW_IMU",
",",
"inclination",
",",
"declination",
")",
":",
"m",
"=",
"mag_rotation",
"(",
"RAW_IMU",
",",
"inclination",
",",
"declination",
")",
"(",
"r",
",",
"p",
",",
"y",
")",
"=",
"m",
".",
"to_euler",
"(",
")",
"return",
"degrees",
"(",
"r",
")"
]
| 36.8 | 12.4 |
def harvest(lancet, config_section):
"""Construct a new Harvest client."""
url, username, password = lancet.get_credentials(
config_section, credentials_checker
)
project_id_getter = lancet.get_instance_from_config(
"timer", "project_id_getter", lancet
)
task_id_getter = lancet.get_instance_from_config(
"timer", "task_id_getter", lancet
)
client = HarvestPlatform(
server=url,
basic_auth=(username, password),
project_id_getter=project_id_getter,
task_id_getter=task_id_getter,
)
lancet.call_on_close(client.close)
return client | [
"def",
"harvest",
"(",
"lancet",
",",
"config_section",
")",
":",
"url",
",",
"username",
",",
"password",
"=",
"lancet",
".",
"get_credentials",
"(",
"config_section",
",",
"credentials_checker",
")",
"project_id_getter",
"=",
"lancet",
".",
"get_instance_from_config",
"(",
"\"timer\"",
",",
"\"project_id_getter\"",
",",
"lancet",
")",
"task_id_getter",
"=",
"lancet",
".",
"get_instance_from_config",
"(",
"\"timer\"",
",",
"\"task_id_getter\"",
",",
"lancet",
")",
"client",
"=",
"HarvestPlatform",
"(",
"server",
"=",
"url",
",",
"basic_auth",
"=",
"(",
"username",
",",
"password",
")",
",",
"project_id_getter",
"=",
"project_id_getter",
",",
"task_id_getter",
"=",
"task_id_getter",
",",
")",
"lancet",
".",
"call_on_close",
"(",
"client",
".",
"close",
")",
"return",
"client"
]
| 29.142857 | 16.047619 |
def location(self, wave_field, depth=None, index=None):
"""Create a Location for a specific depth.
Parameters
----------
wave_field: str
Wave field. See :class:`Location` for possible values.
depth: float, optional
Depth corresponding to the :class`Location` of interest. If
provided, then index is ignored.
index: int, optional
Index corresponding to layer of interest in :class:`Profile`. If
provided, then depth is ignored and location is provided a top
of layer.
Returns
-------
Location
Corresponding :class:`Location` object.
"""
if not isinstance(wave_field, WaveField):
wave_field = WaveField[wave_field]
if index is None and depth is not None:
for i, layer in enumerate(self[:-1]):
if layer.depth <= depth < layer.depth_base:
depth_within = depth - layer.depth
break
else:
# Bedrock
i = len(self) - 1
layer = self[-1]
depth_within = 0
elif index is not None and depth is None:
layer = self[index]
i = self.index(layer)
depth_within = 0
else:
raise NotImplementedError
return Location(i, layer, wave_field, depth_within) | [
"def",
"location",
"(",
"self",
",",
"wave_field",
",",
"depth",
"=",
"None",
",",
"index",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"wave_field",
",",
"WaveField",
")",
":",
"wave_field",
"=",
"WaveField",
"[",
"wave_field",
"]",
"if",
"index",
"is",
"None",
"and",
"depth",
"is",
"not",
"None",
":",
"for",
"i",
",",
"layer",
"in",
"enumerate",
"(",
"self",
"[",
":",
"-",
"1",
"]",
")",
":",
"if",
"layer",
".",
"depth",
"<=",
"depth",
"<",
"layer",
".",
"depth_base",
":",
"depth_within",
"=",
"depth",
"-",
"layer",
".",
"depth",
"break",
"else",
":",
"# Bedrock",
"i",
"=",
"len",
"(",
"self",
")",
"-",
"1",
"layer",
"=",
"self",
"[",
"-",
"1",
"]",
"depth_within",
"=",
"0",
"elif",
"index",
"is",
"not",
"None",
"and",
"depth",
"is",
"None",
":",
"layer",
"=",
"self",
"[",
"index",
"]",
"i",
"=",
"self",
".",
"index",
"(",
"layer",
")",
"depth_within",
"=",
"0",
"else",
":",
"raise",
"NotImplementedError",
"return",
"Location",
"(",
"i",
",",
"layer",
",",
"wave_field",
",",
"depth_within",
")"
]
| 34.195122 | 17.536585 |
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
mask: torch.LongTensor) -> torch.Tensor:
"""
Parameters
----------
inputs : ``torch.Tensor``, required.
A Tensor of shape ``(batch_size, sequence_length, hidden_size)``.
mask : ``torch.LongTensor``, required.
A binary mask of shape ``(batch_size, sequence_length)`` representing the
non-padded elements in each sequence in the batch.
Returns
-------
A ``torch.Tensor`` of shape (num_layers, batch_size, sequence_length, hidden_size),
where the num_layers dimension represents the LSTM output from that layer.
"""
batch_size, total_sequence_length = mask.size()
stacked_sequence_output, final_states, restoration_indices = \
self.sort_and_run_forward(self._lstm_forward, inputs, mask)
num_layers, num_valid, returned_timesteps, encoder_dim = stacked_sequence_output.size()
# Add back invalid rows which were removed in the call to sort_and_run_forward.
if num_valid < batch_size:
zeros = stacked_sequence_output.new_zeros(num_layers,
batch_size - num_valid,
returned_timesteps,
encoder_dim)
stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 1)
# The states also need to have invalid rows added back.
new_states = []
for state in final_states:
state_dim = state.size(-1)
zeros = state.new_zeros(num_layers, batch_size - num_valid, state_dim)
new_states.append(torch.cat([state, zeros], 1))
final_states = new_states
# It's possible to need to pass sequences which are padded to longer than the
# max length of the sequence to a Seq2StackEncoder. However, packing and unpacking
# the sequences mean that the returned tensor won't include these dimensions, because
# the RNN did not need to process them. We add them back on in the form of zeros here.
sequence_length_difference = total_sequence_length - returned_timesteps
if sequence_length_difference > 0:
zeros = stacked_sequence_output.new_zeros(num_layers,
batch_size,
sequence_length_difference,
stacked_sequence_output[0].size(-1))
stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 2)
self._update_states(final_states, restoration_indices)
# Restore the original indices and return the sequence.
# Has shape (num_layers, batch_size, sequence_length, hidden_size)
return stacked_sequence_output.index_select(1, restoration_indices) | [
"def",
"forward",
"(",
"self",
",",
"# pylint: disable=arguments-differ",
"inputs",
":",
"torch",
".",
"Tensor",
",",
"mask",
":",
"torch",
".",
"LongTensor",
")",
"->",
"torch",
".",
"Tensor",
":",
"batch_size",
",",
"total_sequence_length",
"=",
"mask",
".",
"size",
"(",
")",
"stacked_sequence_output",
",",
"final_states",
",",
"restoration_indices",
"=",
"self",
".",
"sort_and_run_forward",
"(",
"self",
".",
"_lstm_forward",
",",
"inputs",
",",
"mask",
")",
"num_layers",
",",
"num_valid",
",",
"returned_timesteps",
",",
"encoder_dim",
"=",
"stacked_sequence_output",
".",
"size",
"(",
")",
"# Add back invalid rows which were removed in the call to sort_and_run_forward.",
"if",
"num_valid",
"<",
"batch_size",
":",
"zeros",
"=",
"stacked_sequence_output",
".",
"new_zeros",
"(",
"num_layers",
",",
"batch_size",
"-",
"num_valid",
",",
"returned_timesteps",
",",
"encoder_dim",
")",
"stacked_sequence_output",
"=",
"torch",
".",
"cat",
"(",
"[",
"stacked_sequence_output",
",",
"zeros",
"]",
",",
"1",
")",
"# The states also need to have invalid rows added back.",
"new_states",
"=",
"[",
"]",
"for",
"state",
"in",
"final_states",
":",
"state_dim",
"=",
"state",
".",
"size",
"(",
"-",
"1",
")",
"zeros",
"=",
"state",
".",
"new_zeros",
"(",
"num_layers",
",",
"batch_size",
"-",
"num_valid",
",",
"state_dim",
")",
"new_states",
".",
"append",
"(",
"torch",
".",
"cat",
"(",
"[",
"state",
",",
"zeros",
"]",
",",
"1",
")",
")",
"final_states",
"=",
"new_states",
"# It's possible to need to pass sequences which are padded to longer than the",
"# max length of the sequence to a Seq2StackEncoder. However, packing and unpacking",
"# the sequences mean that the returned tensor won't include these dimensions, because",
"# the RNN did not need to process them. We add them back on in the form of zeros here.",
"sequence_length_difference",
"=",
"total_sequence_length",
"-",
"returned_timesteps",
"if",
"sequence_length_difference",
">",
"0",
":",
"zeros",
"=",
"stacked_sequence_output",
".",
"new_zeros",
"(",
"num_layers",
",",
"batch_size",
",",
"sequence_length_difference",
",",
"stacked_sequence_output",
"[",
"0",
"]",
".",
"size",
"(",
"-",
"1",
")",
")",
"stacked_sequence_output",
"=",
"torch",
".",
"cat",
"(",
"[",
"stacked_sequence_output",
",",
"zeros",
"]",
",",
"2",
")",
"self",
".",
"_update_states",
"(",
"final_states",
",",
"restoration_indices",
")",
"# Restore the original indices and return the sequence.",
"# Has shape (num_layers, batch_size, sequence_length, hidden_size)",
"return",
"stacked_sequence_output",
".",
"index_select",
"(",
"1",
",",
"restoration_indices",
")"
]
| 54.545455 | 28.727273 |
def pb(name, data, display_name=None, description=None):
"""Create a legacy text summary protobuf.
Arguments:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
data: A Python bytestring (of type bytes), or Unicode string. Or a numpy
data array of those types.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Raises:
ValueError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
try:
tensor = tf.make_tensor_proto(data, dtype=tf.string)
except TypeError as e:
raise ValueError(e)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/text_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary | [
"def",
"pb",
"(",
"name",
",",
"data",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"try",
":",
"tensor",
"=",
"tf",
".",
"make_tensor_proto",
"(",
"data",
",",
"dtype",
"=",
"tf",
".",
"string",
")",
"except",
"TypeError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"e",
")",
"if",
"display_name",
"is",
"None",
":",
"display_name",
"=",
"name",
"summary_metadata",
"=",
"metadata",
".",
"create_summary_metadata",
"(",
"display_name",
"=",
"display_name",
",",
"description",
"=",
"description",
")",
"tf_summary_metadata",
"=",
"tf",
".",
"SummaryMetadata",
".",
"FromString",
"(",
"summary_metadata",
".",
"SerializeToString",
"(",
")",
")",
"summary",
"=",
"tf",
".",
"Summary",
"(",
")",
"summary",
".",
"value",
".",
"add",
"(",
"tag",
"=",
"'%s/text_summary'",
"%",
"name",
",",
"metadata",
"=",
"tf_summary_metadata",
",",
"tensor",
"=",
"tensor",
")",
"return",
"summary"
]
| 34.157895 | 20.315789 |
def get_item_balances(self, acc: Account) -> list:
"""
Returns balances of items of the invoice.
:param acc: Account
:return: list (AccountEntry, Decimal) in item id order
"""
items = []
entries = self.get_entries(acc)
for item in entries.filter(source_invoice=self).order_by('id'):
assert isinstance(item, AccountEntry)
settlements = sum_queryset(entries.filter(settled_item=item))
bal = item.amount + settlements if item.amount is not None else settlements
items.append((item, bal))
return items | [
"def",
"get_item_balances",
"(",
"self",
",",
"acc",
":",
"Account",
")",
"->",
"list",
":",
"items",
"=",
"[",
"]",
"entries",
"=",
"self",
".",
"get_entries",
"(",
"acc",
")",
"for",
"item",
"in",
"entries",
".",
"filter",
"(",
"source_invoice",
"=",
"self",
")",
".",
"order_by",
"(",
"'id'",
")",
":",
"assert",
"isinstance",
"(",
"item",
",",
"AccountEntry",
")",
"settlements",
"=",
"sum_queryset",
"(",
"entries",
".",
"filter",
"(",
"settled_item",
"=",
"item",
")",
")",
"bal",
"=",
"item",
".",
"amount",
"+",
"settlements",
"if",
"item",
".",
"amount",
"is",
"not",
"None",
"else",
"settlements",
"items",
".",
"append",
"(",
"(",
"item",
",",
"bal",
")",
")",
"return",
"items"
]
| 43.142857 | 15.714286 |
def get_common_payload_template(services=None):
"""
Get a template dictionary that can be used to create a payload object.
TODO: services s/b a list. Specify required fields for each service in list.
None means all services.
"""
available_services = {'citrine', 'materials_commons', 'materials_data_facility'}
if services is None:
services = list(available_services)
else:
services = [service for service in services if service in available_services]
if not services:
services = list(available_services)
combined_requirements = {}
for service in services:
# TODO(Recursive check dictionaries to make sure all requirements fields are combined.)
service_requirements = eval('_%s_metadata_requirements()' % service)
if service_requirements:
for key in service_requirements:
if not key in combined_requirements:
combined_requirements[key] = service_requirements[key]
return {
'all_fields': {
'title': 'string',
'source': {
'name': 'string',
'producer': 'string',
'url': 'url string',
'tags': ['string']
},
'data_contacts': [
{
'given_name': 'string',
'family_name': 'string',
'title': 'string',
'orcid': 'TBD',
'email': 'string',
'tags': ['string']
}
],
'data_contributors': [
{
'given_name': 'string',
'family_name': 'string',
'title': 'string',
'orcid': 'TBD',
'email': 'string',
'tags': ['string']
}
],
'links': {
'landing_page': 'uri (string)',
'publication': ['uri (string)'],
'data_doi': 'uri (string)',
'related_id': ['string'],
'parent_id': 'string'
},
'authors': [
{
'given_name': 'string',
'family_name': 'string',
'title': 'string',
'orcid': 'TBD',
'email': 'string',
'tags': ['string']
}
],
'licenses': [
{
'name': 'string',
'description': 'string',
'url': 'string',
'tags': ['string']
}
],
'citations': [
{
'authors': [
{
'given_name': 'string',
'family_name': 'string',
'title': 'string',
'orcid': 'TBD',
'email': 'string',
'tags': ['string']
}
],
'year': 'string',
'title': 'string',
'journal': 'string',
'volume': 'string',
'issue': 'string',
'page_location': 'string',
'edition': 'string',
'publication_location': 'string',
'publisher': 'string',
'extent': 'string',
'notes': 'string',
}
],
'repository': 'not yet available',
'collection': 'not yet available',
'tags': ['string'],
'description': 'string',
'raw': 'not yet available',
'year': 'integer',
'composition': 'not yet available'
},
'required_fields': combined_requirements,
'usage': 'payload = <service class, e.g. CITPayload>(**input_dictionary).metapayload'
} | [
"def",
"get_common_payload_template",
"(",
"services",
"=",
"None",
")",
":",
"available_services",
"=",
"{",
"'citrine'",
",",
"'materials_commons'",
",",
"'materials_data_facility'",
"}",
"if",
"services",
"is",
"None",
":",
"services",
"=",
"list",
"(",
"available_services",
")",
"else",
":",
"services",
"=",
"[",
"service",
"for",
"service",
"in",
"services",
"if",
"service",
"in",
"available_services",
"]",
"if",
"not",
"services",
":",
"services",
"=",
"list",
"(",
"available_services",
")",
"combined_requirements",
"=",
"{",
"}",
"for",
"service",
"in",
"services",
":",
"# TODO(Recursive check dictionaries to make sure all requirements fields are combined.)\r",
"service_requirements",
"=",
"eval",
"(",
"'_%s_metadata_requirements()'",
"%",
"service",
")",
"if",
"service_requirements",
":",
"for",
"key",
"in",
"service_requirements",
":",
"if",
"not",
"key",
"in",
"combined_requirements",
":",
"combined_requirements",
"[",
"key",
"]",
"=",
"service_requirements",
"[",
"key",
"]",
"return",
"{",
"'all_fields'",
":",
"{",
"'title'",
":",
"'string'",
",",
"'source'",
":",
"{",
"'name'",
":",
"'string'",
",",
"'producer'",
":",
"'string'",
",",
"'url'",
":",
"'url string'",
",",
"'tags'",
":",
"[",
"'string'",
"]",
"}",
",",
"'data_contacts'",
":",
"[",
"{",
"'given_name'",
":",
"'string'",
",",
"'family_name'",
":",
"'string'",
",",
"'title'",
":",
"'string'",
",",
"'orcid'",
":",
"'TBD'",
",",
"'email'",
":",
"'string'",
",",
"'tags'",
":",
"[",
"'string'",
"]",
"}",
"]",
",",
"'data_contributors'",
":",
"[",
"{",
"'given_name'",
":",
"'string'",
",",
"'family_name'",
":",
"'string'",
",",
"'title'",
":",
"'string'",
",",
"'orcid'",
":",
"'TBD'",
",",
"'email'",
":",
"'string'",
",",
"'tags'",
":",
"[",
"'string'",
"]",
"}",
"]",
",",
"'links'",
":",
"{",
"'landing_page'",
":",
"'uri (string)'",
",",
"'publication'",
":",
"[",
"'uri (string)'",
"]",
",",
"'data_doi'",
":",
"'uri (string)'",
",",
"'related_id'",
":",
"[",
"'string'",
"]",
",",
"'parent_id'",
":",
"'string'",
"}",
",",
"'authors'",
":",
"[",
"{",
"'given_name'",
":",
"'string'",
",",
"'family_name'",
":",
"'string'",
",",
"'title'",
":",
"'string'",
",",
"'orcid'",
":",
"'TBD'",
",",
"'email'",
":",
"'string'",
",",
"'tags'",
":",
"[",
"'string'",
"]",
"}",
"]",
",",
"'licenses'",
":",
"[",
"{",
"'name'",
":",
"'string'",
",",
"'description'",
":",
"'string'",
",",
"'url'",
":",
"'string'",
",",
"'tags'",
":",
"[",
"'string'",
"]",
"}",
"]",
",",
"'citations'",
":",
"[",
"{",
"'authors'",
":",
"[",
"{",
"'given_name'",
":",
"'string'",
",",
"'family_name'",
":",
"'string'",
",",
"'title'",
":",
"'string'",
",",
"'orcid'",
":",
"'TBD'",
",",
"'email'",
":",
"'string'",
",",
"'tags'",
":",
"[",
"'string'",
"]",
"}",
"]",
",",
"'year'",
":",
"'string'",
",",
"'title'",
":",
"'string'",
",",
"'journal'",
":",
"'string'",
",",
"'volume'",
":",
"'string'",
",",
"'issue'",
":",
"'string'",
",",
"'page_location'",
":",
"'string'",
",",
"'edition'",
":",
"'string'",
",",
"'publication_location'",
":",
"'string'",
",",
"'publisher'",
":",
"'string'",
",",
"'extent'",
":",
"'string'",
",",
"'notes'",
":",
"'string'",
",",
"}",
"]",
",",
"'repository'",
":",
"'not yet available'",
",",
"'collection'",
":",
"'not yet available'",
",",
"'tags'",
":",
"[",
"'string'",
"]",
",",
"'description'",
":",
"'string'",
",",
"'raw'",
":",
"'not yet available'",
",",
"'year'",
":",
"'integer'",
",",
"'composition'",
":",
"'not yet available'",
"}",
",",
"'required_fields'",
":",
"combined_requirements",
",",
"'usage'",
":",
"'payload = <service class, e.g. CITPayload>(**input_dictionary).metapayload'",
"}"
]
| 36.696429 | 12.535714 |
def readConfig(self, configuration):
"""Read configuration from dict.
Read configuration from a JSON configuration file.
:param configuration: configuration to load.
:type configuration: dict.
"""
self.__logger.debug("Reading configuration")
self.city = configuration["name"]
self.__logger.info("City name: " + self.city)
if "intervals" in configuration:
self.__intervals = configuration["intervals"]
self.__logger.debug("Intervals: " +
str(self.__intervals))
if "last_date" in configuration:
self.__lastDay = configuration["last_date"]
self.__logger.debug("Last day: " + self.__lastDay)
if "locations" in configuration:
self.__locations = configuration["locations"]
self.__logger.debug("Locations: " +
str(self.__locations))
self.__addLocationsToURL(self.__locations)
if "excludedUsers" in configuration:
self.__excludedUsers= set()
self.__excludedLocations = set()
excluded = configuration["excludedUsers"]
for e in excluded:
self.__excludedUsers.add(e)
self.__logger.debug("Excluded users " +
str(self.__excludedUsers))
if "excludedLocations" in configuration:
excluded = configuration["excludedLocations"]
for e in excluded:
self.__excludedLocations.add(e)
self.__logger.debug("Excluded locations " +
str(self.__excludedLocations)) | [
"def",
"readConfig",
"(",
"self",
",",
"configuration",
")",
":",
"self",
".",
"__logger",
".",
"debug",
"(",
"\"Reading configuration\"",
")",
"self",
".",
"city",
"=",
"configuration",
"[",
"\"name\"",
"]",
"self",
".",
"__logger",
".",
"info",
"(",
"\"City name: \"",
"+",
"self",
".",
"city",
")",
"if",
"\"intervals\"",
"in",
"configuration",
":",
"self",
".",
"__intervals",
"=",
"configuration",
"[",
"\"intervals\"",
"]",
"self",
".",
"__logger",
".",
"debug",
"(",
"\"Intervals: \"",
"+",
"str",
"(",
"self",
".",
"__intervals",
")",
")",
"if",
"\"last_date\"",
"in",
"configuration",
":",
"self",
".",
"__lastDay",
"=",
"configuration",
"[",
"\"last_date\"",
"]",
"self",
".",
"__logger",
".",
"debug",
"(",
"\"Last day: \"",
"+",
"self",
".",
"__lastDay",
")",
"if",
"\"locations\"",
"in",
"configuration",
":",
"self",
".",
"__locations",
"=",
"configuration",
"[",
"\"locations\"",
"]",
"self",
".",
"__logger",
".",
"debug",
"(",
"\"Locations: \"",
"+",
"str",
"(",
"self",
".",
"__locations",
")",
")",
"self",
".",
"__addLocationsToURL",
"(",
"self",
".",
"__locations",
")",
"if",
"\"excludedUsers\"",
"in",
"configuration",
":",
"self",
".",
"__excludedUsers",
"=",
"set",
"(",
")",
"self",
".",
"__excludedLocations",
"=",
"set",
"(",
")",
"excluded",
"=",
"configuration",
"[",
"\"excludedUsers\"",
"]",
"for",
"e",
"in",
"excluded",
":",
"self",
".",
"__excludedUsers",
".",
"add",
"(",
"e",
")",
"self",
".",
"__logger",
".",
"debug",
"(",
"\"Excluded users \"",
"+",
"str",
"(",
"self",
".",
"__excludedUsers",
")",
")",
"if",
"\"excludedLocations\"",
"in",
"configuration",
":",
"excluded",
"=",
"configuration",
"[",
"\"excludedLocations\"",
"]",
"for",
"e",
"in",
"excluded",
":",
"self",
".",
"__excludedLocations",
".",
"add",
"(",
"e",
")",
"self",
".",
"__logger",
".",
"debug",
"(",
"\"Excluded locations \"",
"+",
"str",
"(",
"self",
".",
"__excludedLocations",
")",
")"
]
| 38.255814 | 15.255814 |
def read_tsv(cls, path, encoding='utf-8'):
"""Read a gene set database from a tab-delimited text file.
Parameters
----------
path: str
The path name of the the file.
encoding: str
The encoding of the text file.
Returns
-------
None
"""
gene_sets = []
n = 0
with open(path, 'rb') as fh:
reader = csv.reader(fh, dialect='excel-tab', encoding=encoding)
for l in reader:
n += 1
gs = GeneSet.from_list(l)
gene_sets.append(gs)
logger.debug('Read %d gene sets.', n)
logger.debug('Size of gene set list: %d', len(gene_sets))
return cls(gene_sets) | [
"def",
"read_tsv",
"(",
"cls",
",",
"path",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"gene_sets",
"=",
"[",
"]",
"n",
"=",
"0",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"fh",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"fh",
",",
"dialect",
"=",
"'excel-tab'",
",",
"encoding",
"=",
"encoding",
")",
"for",
"l",
"in",
"reader",
":",
"n",
"+=",
"1",
"gs",
"=",
"GeneSet",
".",
"from_list",
"(",
"l",
")",
"gene_sets",
".",
"append",
"(",
"gs",
")",
"logger",
".",
"debug",
"(",
"'Read %d gene sets.'",
",",
"n",
")",
"logger",
".",
"debug",
"(",
"'Size of gene set list: %d'",
",",
"len",
"(",
"gene_sets",
")",
")",
"return",
"cls",
"(",
"gene_sets",
")"
]
| 29.28 | 16.4 |
def p_property_assignment(self, p):
"""property_assignment \
: property_name COLON assignment_expr
| GETPROP property_name LPAREN RPAREN LBRACE function_body RBRACE
| SETPROP property_name LPAREN property_set_parameter_list RPAREN\
LBRACE function_body RBRACE
"""
if len(p) == 4:
p[0] = self.asttypes.Assign(left=p[1], op=p[2], right=p[3])
p[0].setpos(p, 2)
elif len(p) == 8:
p[0] = self.asttypes.GetPropAssign(prop_name=p[2], elements=p[6])
p[0].setpos(p)
else:
p[0] = self.asttypes.SetPropAssign(
prop_name=p[2], parameter=p[4], elements=p[7])
p[0].setpos(p) | [
"def",
"p_property_assignment",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"4",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"Assign",
"(",
"left",
"=",
"p",
"[",
"1",
"]",
",",
"op",
"=",
"p",
"[",
"2",
"]",
",",
"right",
"=",
"p",
"[",
"3",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
",",
"2",
")",
"elif",
"len",
"(",
"p",
")",
"==",
"8",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"GetPropAssign",
"(",
"prop_name",
"=",
"p",
"[",
"2",
"]",
",",
"elements",
"=",
"p",
"[",
"6",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"self",
".",
"asttypes",
".",
"SetPropAssign",
"(",
"prop_name",
"=",
"p",
"[",
"2",
"]",
",",
"parameter",
"=",
"p",
"[",
"4",
"]",
",",
"elements",
"=",
"p",
"[",
"7",
"]",
")",
"p",
"[",
"0",
"]",
".",
"setpos",
"(",
"p",
")"
]
| 42.941176 | 17.235294 |
def create_token(
self,
registry_address,
initial_alloc=10 ** 6,
name='raidentester',
symbol='RDT',
decimals=2,
timeout=60,
auto_register=True,
):
""" Create a proxy for a new HumanStandardToken (ERC20), that is
initialized with Args(below).
Per default it will be registered with 'raiden'.
Args:
initial_alloc (int): amount of initial tokens.
name (str): human readable token name.
symbol (str): token shorthand symbol.
decimals (int): decimal places.
timeout (int): timeout in seconds for creation.
auto_register (boolean): if True(default), automatically register
the token with raiden.
Returns:
token_address_hex: the hex encoded address of the new token/token.
"""
with gevent.Timeout(timeout):
token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
self._chain.client,
contract_manager=self._raiden.contract_manager,
constructor_arguments=(initial_alloc, name, decimals, symbol),
)
token_address_hex = to_checksum_address(token_address)
if auto_register:
self.register_token(registry_address, token_address_hex)
print("Successfully created {}the token '{}'.".format(
'and registered ' if auto_register else ' ',
name,
))
return token_address_hex | [
"def",
"create_token",
"(",
"self",
",",
"registry_address",
",",
"initial_alloc",
"=",
"10",
"**",
"6",
",",
"name",
"=",
"'raidentester'",
",",
"symbol",
"=",
"'RDT'",
",",
"decimals",
"=",
"2",
",",
"timeout",
"=",
"60",
",",
"auto_register",
"=",
"True",
",",
")",
":",
"with",
"gevent",
".",
"Timeout",
"(",
"timeout",
")",
":",
"token_address",
"=",
"deploy_contract_web3",
"(",
"CONTRACT_HUMAN_STANDARD_TOKEN",
",",
"self",
".",
"_chain",
".",
"client",
",",
"contract_manager",
"=",
"self",
".",
"_raiden",
".",
"contract_manager",
",",
"constructor_arguments",
"=",
"(",
"initial_alloc",
",",
"name",
",",
"decimals",
",",
"symbol",
")",
",",
")",
"token_address_hex",
"=",
"to_checksum_address",
"(",
"token_address",
")",
"if",
"auto_register",
":",
"self",
".",
"register_token",
"(",
"registry_address",
",",
"token_address_hex",
")",
"print",
"(",
"\"Successfully created {}the token '{}'.\"",
".",
"format",
"(",
"'and registered '",
"if",
"auto_register",
"else",
"' '",
",",
"name",
",",
")",
")",
"return",
"token_address_hex"
]
| 36.833333 | 18.190476 |
def __geomToPointList(self, geom):
""" converts a geometry object to a common.Geometry object """
sr = geom.spatialReference
wkid = None
wkt = None
if sr is None:
if self._wkid is None and self._wkt is not None:
wkt = self._wkt
else:
wkid = self._wkid
else:
wkid = sr.factoryCode
g = json.loads(geom.JSON)
top = []
for gring in g['rings']:
ring = []
for g in gring:
ring.append(Point(coord=g, wkid=wkid, wkt=wkt, z=None, m=None))
top.append(ring)
return top | [
"def",
"__geomToPointList",
"(",
"self",
",",
"geom",
")",
":",
"sr",
"=",
"geom",
".",
"spatialReference",
"wkid",
"=",
"None",
"wkt",
"=",
"None",
"if",
"sr",
"is",
"None",
":",
"if",
"self",
".",
"_wkid",
"is",
"None",
"and",
"self",
".",
"_wkt",
"is",
"not",
"None",
":",
"wkt",
"=",
"self",
".",
"_wkt",
"else",
":",
"wkid",
"=",
"self",
".",
"_wkid",
"else",
":",
"wkid",
"=",
"sr",
".",
"factoryCode",
"g",
"=",
"json",
".",
"loads",
"(",
"geom",
".",
"JSON",
")",
"top",
"=",
"[",
"]",
"for",
"gring",
"in",
"g",
"[",
"'rings'",
"]",
":",
"ring",
"=",
"[",
"]",
"for",
"g",
"in",
"gring",
":",
"ring",
".",
"append",
"(",
"Point",
"(",
"coord",
"=",
"g",
",",
"wkid",
"=",
"wkid",
",",
"wkt",
"=",
"wkt",
",",
"z",
"=",
"None",
",",
"m",
"=",
"None",
")",
")",
"top",
".",
"append",
"(",
"ring",
")",
"return",
"top"
]
| 31.9 | 15.5 |
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection) | [
"def",
"draw_collection",
"(",
"self",
",",
"ax",
",",
"collection",
",",
"force_pathtrans",
"=",
"None",
",",
"force_offsettrans",
"=",
"None",
")",
":",
"(",
"transform",
",",
"transOffset",
",",
"offsets",
",",
"paths",
")",
"=",
"collection",
".",
"_prepare_points",
"(",
")",
"offset_coords",
",",
"offsets",
"=",
"self",
".",
"process_transform",
"(",
"transOffset",
",",
"ax",
",",
"offsets",
",",
"force_trans",
"=",
"force_offsettrans",
")",
"path_coords",
"=",
"self",
".",
"process_transform",
"(",
"transform",
",",
"ax",
",",
"force_trans",
"=",
"force_pathtrans",
")",
"processed_paths",
"=",
"[",
"utils",
".",
"SVG_path",
"(",
"path",
")",
"for",
"path",
"in",
"paths",
"]",
"processed_paths",
"=",
"[",
"(",
"self",
".",
"process_transform",
"(",
"transform",
",",
"ax",
",",
"path",
"[",
"0",
"]",
",",
"force_trans",
"=",
"force_pathtrans",
")",
"[",
"1",
"]",
",",
"path",
"[",
"1",
"]",
")",
"for",
"path",
"in",
"processed_paths",
"]",
"path_transforms",
"=",
"collection",
".",
"get_transforms",
"(",
")",
"try",
":",
"# matplotlib 1.3: path_transforms are transform objects.",
"# Convert them to numpy arrays.",
"path_transforms",
"=",
"[",
"t",
".",
"get_matrix",
"(",
")",
"for",
"t",
"in",
"path_transforms",
"]",
"except",
"AttributeError",
":",
"# matplotlib 1.4: path transforms are already numpy arrays.",
"pass",
"styles",
"=",
"{",
"'linewidth'",
":",
"collection",
".",
"get_linewidths",
"(",
")",
",",
"'facecolor'",
":",
"collection",
".",
"get_facecolors",
"(",
")",
",",
"'edgecolor'",
":",
"collection",
".",
"get_edgecolors",
"(",
")",
",",
"'alpha'",
":",
"collection",
".",
"_alpha",
",",
"'zorder'",
":",
"collection",
".",
"get_zorder",
"(",
")",
"}",
"offset_dict",
"=",
"{",
"\"data\"",
":",
"\"before\"",
",",
"\"screen\"",
":",
"\"after\"",
"}",
"offset_order",
"=",
"offset_dict",
"[",
"collection",
".",
"get_offset_position",
"(",
")",
"]",
"self",
".",
"renderer",
".",
"draw_path_collection",
"(",
"paths",
"=",
"processed_paths",
",",
"path_coordinates",
"=",
"path_coords",
",",
"path_transforms",
"=",
"path_transforms",
",",
"offsets",
"=",
"offsets",
",",
"offset_coordinates",
"=",
"offset_coords",
",",
"offset_order",
"=",
"offset_order",
",",
"styles",
"=",
"styles",
",",
"mplobj",
"=",
"collection",
")"
]
| 47.25 | 20.454545 |
def all(cls, client, symbols):
""""
fetch data for multiple stocks
"""
params = {"symbol": ",".join(symbols)}
request_url = "https://api.robinhood.com/instruments/"
data = client.get(request_url, params=params)
results = data["results"]
while data["next"]:
data = client.get(data["next"])
results.extend(data["results"])
return results | [
"def",
"all",
"(",
"cls",
",",
"client",
",",
"symbols",
")",
":",
"params",
"=",
"{",
"\"symbol\"",
":",
"\",\"",
".",
"join",
"(",
"symbols",
")",
"}",
"request_url",
"=",
"\"https://api.robinhood.com/instruments/\"",
"data",
"=",
"client",
".",
"get",
"(",
"request_url",
",",
"params",
"=",
"params",
")",
"results",
"=",
"data",
"[",
"\"results\"",
"]",
"while",
"data",
"[",
"\"next\"",
"]",
":",
"data",
"=",
"client",
".",
"get",
"(",
"data",
"[",
"\"next\"",
"]",
")",
"results",
".",
"extend",
"(",
"data",
"[",
"\"results\"",
"]",
")",
"return",
"results"
]
| 30 | 12.642857 |
def _get_entity_by_class(self, entity_cls):
"""Fetch Entity record with Entity class details"""
entity_qualname = fully_qualified_name(entity_cls)
if entity_qualname in self._registry:
return self._registry[entity_qualname]
else:
return self._find_entity_in_records_by_class_name(entity_cls.__name__) | [
"def",
"_get_entity_by_class",
"(",
"self",
",",
"entity_cls",
")",
":",
"entity_qualname",
"=",
"fully_qualified_name",
"(",
"entity_cls",
")",
"if",
"entity_qualname",
"in",
"self",
".",
"_registry",
":",
"return",
"self",
".",
"_registry",
"[",
"entity_qualname",
"]",
"else",
":",
"return",
"self",
".",
"_find_entity_in_records_by_class_name",
"(",
"entity_cls",
".",
"__name__",
")"
]
| 50 | 15 |
def get_intrinsic_rewards(self, curr_info, next_info):
"""
Generates intrinsic reward used for Curiosity-based training.
:BrainInfo curr_info: Current BrainInfo.
:BrainInfo next_info: Next BrainInfo.
:return: Intrinsic rewards for all agents.
"""
if self.use_curiosity:
if len(curr_info.agents) == 0:
return []
feed_dict = {self.model.batch_size: len(next_info.vector_observations),
self.model.sequence_length: 1}
if self.use_continuous_act:
feed_dict[self.model.selected_actions] = next_info.previous_vector_actions
else:
feed_dict[self.model.action_holder] = next_info.previous_vector_actions
for i in range(self.model.vis_obs_size):
feed_dict[self.model.visual_in[i]] = curr_info.visual_observations[i]
feed_dict[self.model.next_visual_in[i]] = next_info.visual_observations[i]
if self.use_vec_obs:
feed_dict[self.model.vector_in] = curr_info.vector_observations
feed_dict[self.model.next_vector_in] = next_info.vector_observations
if self.use_recurrent:
if curr_info.memories.shape[1] == 0:
curr_info.memories = self.make_empty_memory(len(curr_info.agents))
feed_dict[self.model.memory_in] = curr_info.memories
intrinsic_rewards = self.sess.run(self.model.intrinsic_reward,
feed_dict=feed_dict) * float(self.has_updated)
return intrinsic_rewards
else:
return None | [
"def",
"get_intrinsic_rewards",
"(",
"self",
",",
"curr_info",
",",
"next_info",
")",
":",
"if",
"self",
".",
"use_curiosity",
":",
"if",
"len",
"(",
"curr_info",
".",
"agents",
")",
"==",
"0",
":",
"return",
"[",
"]",
"feed_dict",
"=",
"{",
"self",
".",
"model",
".",
"batch_size",
":",
"len",
"(",
"next_info",
".",
"vector_observations",
")",
",",
"self",
".",
"model",
".",
"sequence_length",
":",
"1",
"}",
"if",
"self",
".",
"use_continuous_act",
":",
"feed_dict",
"[",
"self",
".",
"model",
".",
"selected_actions",
"]",
"=",
"next_info",
".",
"previous_vector_actions",
"else",
":",
"feed_dict",
"[",
"self",
".",
"model",
".",
"action_holder",
"]",
"=",
"next_info",
".",
"previous_vector_actions",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"model",
".",
"vis_obs_size",
")",
":",
"feed_dict",
"[",
"self",
".",
"model",
".",
"visual_in",
"[",
"i",
"]",
"]",
"=",
"curr_info",
".",
"visual_observations",
"[",
"i",
"]",
"feed_dict",
"[",
"self",
".",
"model",
".",
"next_visual_in",
"[",
"i",
"]",
"]",
"=",
"next_info",
".",
"visual_observations",
"[",
"i",
"]",
"if",
"self",
".",
"use_vec_obs",
":",
"feed_dict",
"[",
"self",
".",
"model",
".",
"vector_in",
"]",
"=",
"curr_info",
".",
"vector_observations",
"feed_dict",
"[",
"self",
".",
"model",
".",
"next_vector_in",
"]",
"=",
"next_info",
".",
"vector_observations",
"if",
"self",
".",
"use_recurrent",
":",
"if",
"curr_info",
".",
"memories",
".",
"shape",
"[",
"1",
"]",
"==",
"0",
":",
"curr_info",
".",
"memories",
"=",
"self",
".",
"make_empty_memory",
"(",
"len",
"(",
"curr_info",
".",
"agents",
")",
")",
"feed_dict",
"[",
"self",
".",
"model",
".",
"memory_in",
"]",
"=",
"curr_info",
".",
"memories",
"intrinsic_rewards",
"=",
"self",
".",
"sess",
".",
"run",
"(",
"self",
".",
"model",
".",
"intrinsic_reward",
",",
"feed_dict",
"=",
"feed_dict",
")",
"*",
"float",
"(",
"self",
".",
"has_updated",
")",
"return",
"intrinsic_rewards",
"else",
":",
"return",
"None"
]
| 51.75 | 23 |
def custom_decode(encoding):
"""Overrides encoding when charset declaration
or charset determination is a subset of a larger
charset. Created because of issues with Chinese websites"""
encoding = encoding.lower()
alternates = {
'big5': 'big5hkscs',
'gb2312': 'gb18030',
'ascii': 'utf-8',
'MacCyrillic': 'cp1251',
}
if encoding in alternates:
return alternates[encoding]
else:
return encoding | [
"def",
"custom_decode",
"(",
"encoding",
")",
":",
"encoding",
"=",
"encoding",
".",
"lower",
"(",
")",
"alternates",
"=",
"{",
"'big5'",
":",
"'big5hkscs'",
",",
"'gb2312'",
":",
"'gb18030'",
",",
"'ascii'",
":",
"'utf-8'",
",",
"'MacCyrillic'",
":",
"'cp1251'",
",",
"}",
"if",
"encoding",
"in",
"alternates",
":",
"return",
"alternates",
"[",
"encoding",
"]",
"else",
":",
"return",
"encoding"
]
| 30.933333 | 13.533333 |
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step,
event_wall_time, num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio):
"""Return the string message associated with TensorBoard purges."""
return ('Detected out of order event.step likely caused by '
'a TensorFlow restart. Purging expired events from Tensorboard'
' display between the previous step: {} (timestamp: {}) and '
'current step: {} (timestamp: {}). Removing {} scalars, {} '
'histograms, {} compressed histograms, {} images, '
'and {} audio.').format(most_recent_step, most_recent_wall_time,
event_step, event_wall_time,
num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio) | [
"def",
"_GetPurgeMessage",
"(",
"most_recent_step",
",",
"most_recent_wall_time",
",",
"event_step",
",",
"event_wall_time",
",",
"num_expired_scalars",
",",
"num_expired_histos",
",",
"num_expired_comp_histos",
",",
"num_expired_images",
",",
"num_expired_audio",
")",
":",
"return",
"(",
"'Detected out of order event.step likely caused by '",
"'a TensorFlow restart. Purging expired events from Tensorboard'",
"' display between the previous step: {} (timestamp: {}) and '",
"'current step: {} (timestamp: {}). Removing {} scalars, {} '",
"'histograms, {} compressed histograms, {} images, '",
"'and {} audio.'",
")",
".",
"format",
"(",
"most_recent_step",
",",
"most_recent_wall_time",
",",
"event_step",
",",
"event_wall_time",
",",
"num_expired_scalars",
",",
"num_expired_histos",
",",
"num_expired_comp_histos",
",",
"num_expired_images",
",",
"num_expired_audio",
")"
]
| 66.8 | 24.866667 |
def load_simple_endpoint(category, name):
'''fetches the entry point for a plugin and calls it with the given
aux_info'''
for ep in pkg_resources.iter_entry_points(category):
if ep.name == name:
return ep.load()
raise KeyError(name) | [
"def",
"load_simple_endpoint",
"(",
"category",
",",
"name",
")",
":",
"for",
"ep",
"in",
"pkg_resources",
".",
"iter_entry_points",
"(",
"category",
")",
":",
"if",
"ep",
".",
"name",
"==",
"name",
":",
"return",
"ep",
".",
"load",
"(",
")",
"raise",
"KeyError",
"(",
"name",
")"
]
| 37.428571 | 16.285714 |
def global_get(self, key):
"""Return the value for the given key in the ``globals`` table."""
key = self.pack(key)
r = self.sql('global_get', key).fetchone()
if r is None:
raise KeyError("Not set")
return self.unpack(r[0]) | [
"def",
"global_get",
"(",
"self",
",",
"key",
")",
":",
"key",
"=",
"self",
".",
"pack",
"(",
"key",
")",
"r",
"=",
"self",
".",
"sql",
"(",
"'global_get'",
",",
"key",
")",
".",
"fetchone",
"(",
")",
"if",
"r",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"\"Not set\"",
")",
"return",
"self",
".",
"unpack",
"(",
"r",
"[",
"0",
"]",
")"
]
| 38.285714 | 9.428571 |
def check_dataset(self, dataset_id, project_id=None):
"""Check to see if a dataset exists.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
bool
True if dataset at `dataset_id` exists, else Fasle
"""
dataset = self.get_dataset(dataset_id, project_id)
return bool(dataset) | [
"def",
"check_dataset",
"(",
"self",
",",
"dataset_id",
",",
"project_id",
"=",
"None",
")",
":",
"dataset",
"=",
"self",
".",
"get_dataset",
"(",
"dataset_id",
",",
"project_id",
")",
"return",
"bool",
"(",
"dataset",
")"
]
| 27.529412 | 17.764706 |
def get_indexes(self, db_name, tbl_name, max_indexes):
"""
Parameters:
- db_name
- tbl_name
- max_indexes
"""
self.send_get_indexes(db_name, tbl_name, max_indexes)
return self.recv_get_indexes() | [
"def",
"get_indexes",
"(",
"self",
",",
"db_name",
",",
"tbl_name",
",",
"max_indexes",
")",
":",
"self",
".",
"send_get_indexes",
"(",
"db_name",
",",
"tbl_name",
",",
"max_indexes",
")",
"return",
"self",
".",
"recv_get_indexes",
"(",
")"
]
| 24.555556 | 15 |
def encode(cls, value):
"""
take a list and turn it into a utf-8 encoded byte-string for redis.
:param value: list
:return: bytes
"""
try:
coerced = list(value)
if coerced == value:
return json.dumps(coerced).encode(cls._encoding)
except TypeError:
pass
raise InvalidValue('not a list') | [
"def",
"encode",
"(",
"cls",
",",
"value",
")",
":",
"try",
":",
"coerced",
"=",
"list",
"(",
"value",
")",
"if",
"coerced",
"==",
"value",
":",
"return",
"json",
".",
"dumps",
"(",
"coerced",
")",
".",
"encode",
"(",
"cls",
".",
"_encoding",
")",
"except",
"TypeError",
":",
"pass",
"raise",
"InvalidValue",
"(",
"'not a list'",
")"
]
| 26 | 18 |
def get(self, key):
""" Fetch entry from database. """
c = self.conn.cursor()
for row in c.execute("SELECT key, nonce, key_handle, aead, oath_C, oath_T FROM oath WHERE key = ?", (key,)):
return ValOathEntry(row)
raise Exception("OATH token for '%s' not found in database (%s)" % (key, self.filename)) | [
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"c",
"=",
"self",
".",
"conn",
".",
"cursor",
"(",
")",
"for",
"row",
"in",
"c",
".",
"execute",
"(",
"\"SELECT key, nonce, key_handle, aead, oath_C, oath_T FROM oath WHERE key = ?\"",
",",
"(",
"key",
",",
")",
")",
":",
"return",
"ValOathEntry",
"(",
"row",
")",
"raise",
"Exception",
"(",
"\"OATH token for '%s' not found in database (%s)\"",
"%",
"(",
"key",
",",
"self",
".",
"filename",
")",
")"
]
| 56.5 | 27.833333 |
def run_multiple(self, eventLoops):
"""run the event loops in the background.
Args:
eventLoops (list): a list of event loops to run
"""
self.nruns += len(eventLoops)
return self.communicationChannel.put_multiple(eventLoops) | [
"def",
"run_multiple",
"(",
"self",
",",
"eventLoops",
")",
":",
"self",
".",
"nruns",
"+=",
"len",
"(",
"eventLoops",
")",
"return",
"self",
".",
"communicationChannel",
".",
"put_multiple",
"(",
"eventLoops",
")"
]
| 26.9 | 19.9 |
def dataset(
node_parser,
include=lambda x: True,
input_transform=None,
target_transform=None):
"""Convert immediate children of a GroupNode into a torch.data.Dataset
Keyword arguments
* node_parser=callable that converts a DataNode to a Dataset item
* include=lambda x: True
lambda(quilt.nodes.GroupNode) => {True, False}
intended to filter nodes based on metadata
* input_transform=None; optional callable that takes the item as its argument
* output_transform=None; optional callable that takes the item as its argument;
implementation may make its own copy of item to avoid side effects
Dataset.__getitem__ returns the following tuple
item = node_parser(node)
(input_transform(item), output_transform(item))
Or, if no _transform functions are provided:
(item, item)
"""
def _dataset(node, paths): # pylint: disable=unused-argument
return DatasetFromGroupNode(
node,
node_parser=node_parser,
include=include,
input_transform=input_transform,
target_transform=target_transform)
return _dataset | [
"def",
"dataset",
"(",
"node_parser",
",",
"include",
"=",
"lambda",
"x",
":",
"True",
",",
"input_transform",
"=",
"None",
",",
"target_transform",
"=",
"None",
")",
":",
"def",
"_dataset",
"(",
"node",
",",
"paths",
")",
":",
"# pylint: disable=unused-argument",
"return",
"DatasetFromGroupNode",
"(",
"node",
",",
"node_parser",
"=",
"node_parser",
",",
"include",
"=",
"include",
",",
"input_transform",
"=",
"input_transform",
",",
"target_transform",
"=",
"target_transform",
")",
"return",
"_dataset"
]
| 38.4 | 17.3 |
def wake_lock_size(self):
"""Get the size of the current wake lock."""
output = self.adb_shell(WAKE_LOCK_SIZE_CMD)
if not output:
return None
return int(output.split("=")[1].strip()) | [
"def",
"wake_lock_size",
"(",
"self",
")",
":",
"output",
"=",
"self",
".",
"adb_shell",
"(",
"WAKE_LOCK_SIZE_CMD",
")",
"if",
"not",
"output",
":",
"return",
"None",
"return",
"int",
"(",
"output",
".",
"split",
"(",
"\"=\"",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")"
]
| 36.833333 | 11.5 |
def extractDates(self, inp):
"""Extract semantic date information from an input string.
In effect, runs both parseDay and parseTime on the input
string and merges the results to produce a comprehensive
datetime object.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted dates from the
input snippet, or an empty list if not found.
"""
def merge(param):
day, time = param
if not (day or time):
return None
if not day:
return time
if not time:
return day
return datetime.datetime(
day.year, day.month, day.day, time.hour, time.minute
)
days = self.extractDays(inp)
times = self.extractTimes(inp)
return map(merge, zip_longest(days, times, fillvalue=None)) | [
"def",
"extractDates",
"(",
"self",
",",
"inp",
")",
":",
"def",
"merge",
"(",
"param",
")",
":",
"day",
",",
"time",
"=",
"param",
"if",
"not",
"(",
"day",
"or",
"time",
")",
":",
"return",
"None",
"if",
"not",
"day",
":",
"return",
"time",
"if",
"not",
"time",
":",
"return",
"day",
"return",
"datetime",
".",
"datetime",
"(",
"day",
".",
"year",
",",
"day",
".",
"month",
",",
"day",
".",
"day",
",",
"time",
".",
"hour",
",",
"time",
".",
"minute",
")",
"days",
"=",
"self",
".",
"extractDays",
"(",
"inp",
")",
"times",
"=",
"self",
".",
"extractTimes",
"(",
"inp",
")",
"return",
"map",
"(",
"merge",
",",
"zip_longest",
"(",
"days",
",",
"times",
",",
"fillvalue",
"=",
"None",
")",
")"
]
| 31.433333 | 19.6 |
def debug(self, s, level=2):
"""Write a debug message."""
self.write(s, level=level, color='white') | [
"def",
"debug",
"(",
"self",
",",
"s",
",",
"level",
"=",
"2",
")",
":",
"self",
".",
"write",
"(",
"s",
",",
"level",
"=",
"level",
",",
"color",
"=",
"'white'",
")"
]
| 37.666667 | 7 |
def pingback_url(self, server_name, target_url):
"""
Do a pingback call for the target URL.
"""
try:
server = ServerProxy(server_name)
reply = server.pingback.ping(self.entry_url, target_url)
except (Error, socket.error):
reply = '%s cannot be pinged.' % target_url
return reply | [
"def",
"pingback_url",
"(",
"self",
",",
"server_name",
",",
"target_url",
")",
":",
"try",
":",
"server",
"=",
"ServerProxy",
"(",
"server_name",
")",
"reply",
"=",
"server",
".",
"pingback",
".",
"ping",
"(",
"self",
".",
"entry_url",
",",
"target_url",
")",
"except",
"(",
"Error",
",",
"socket",
".",
"error",
")",
":",
"reply",
"=",
"'%s cannot be pinged.'",
"%",
"target_url",
"return",
"reply"
]
| 35.3 | 11.3 |
def cancel_pending_requests(self):
'''Cancel all pending requests.'''
exception = CancelledError()
for _request, event in self._requests.values():
event.result = exception
event.set()
self._requests.clear() | [
"def",
"cancel_pending_requests",
"(",
"self",
")",
":",
"exception",
"=",
"CancelledError",
"(",
")",
"for",
"_request",
",",
"event",
"in",
"self",
".",
"_requests",
".",
"values",
"(",
")",
":",
"event",
".",
"result",
"=",
"exception",
"event",
".",
"set",
"(",
")",
"self",
".",
"_requests",
".",
"clear",
"(",
")"
]
| 36.571429 | 8.285714 |
def get_sorted_nts_omit_section(self, hdrgo_prt, hdrgo_sort):
"""Return a flat list of sections (wo/section names) with GO terms grouped and sorted."""
nts_flat = []
# print("SSSS SorterNts:get_sorted_nts_omit_section(hdrgo_prt={}, hdrgo_sort={})".format(
# hdrgo_prt, hdrgo_sort))
hdrgos_seen = set()
hdrgos_actual = self.sortgos.grprobj.get_hdrgos()
for _, section_hdrgos_all in self.sections:
#section_hdrgos_act = set(section_hdrgos_all).intersection(hdrgos_actual)
section_hdrgos_act = [h for h in section_hdrgos_all if h in hdrgos_actual]
hdrgos_seen |= set(section_hdrgos_act)
self.sortgos.get_sorted_hdrgo2usrgos(
section_hdrgos_act, nts_flat, hdrgo_prt, hdrgo_sort)
remaining_hdrgos = set(self.sortgos.grprobj.get_hdrgos()).difference(hdrgos_seen)
self.sortgos.get_sorted_hdrgo2usrgos(remaining_hdrgos, nts_flat, hdrgo_prt, hdrgo_sort)
return nts_flat | [
"def",
"get_sorted_nts_omit_section",
"(",
"self",
",",
"hdrgo_prt",
",",
"hdrgo_sort",
")",
":",
"nts_flat",
"=",
"[",
"]",
"# print(\"SSSS SorterNts:get_sorted_nts_omit_section(hdrgo_prt={}, hdrgo_sort={})\".format(",
"# hdrgo_prt, hdrgo_sort))",
"hdrgos_seen",
"=",
"set",
"(",
")",
"hdrgos_actual",
"=",
"self",
".",
"sortgos",
".",
"grprobj",
".",
"get_hdrgos",
"(",
")",
"for",
"_",
",",
"section_hdrgos_all",
"in",
"self",
".",
"sections",
":",
"#section_hdrgos_act = set(section_hdrgos_all).intersection(hdrgos_actual)",
"section_hdrgos_act",
"=",
"[",
"h",
"for",
"h",
"in",
"section_hdrgos_all",
"if",
"h",
"in",
"hdrgos_actual",
"]",
"hdrgos_seen",
"|=",
"set",
"(",
"section_hdrgos_act",
")",
"self",
".",
"sortgos",
".",
"get_sorted_hdrgo2usrgos",
"(",
"section_hdrgos_act",
",",
"nts_flat",
",",
"hdrgo_prt",
",",
"hdrgo_sort",
")",
"remaining_hdrgos",
"=",
"set",
"(",
"self",
".",
"sortgos",
".",
"grprobj",
".",
"get_hdrgos",
"(",
")",
")",
".",
"difference",
"(",
"hdrgos_seen",
")",
"self",
".",
"sortgos",
".",
"get_sorted_hdrgo2usrgos",
"(",
"remaining_hdrgos",
",",
"nts_flat",
",",
"hdrgo_prt",
",",
"hdrgo_sort",
")",
"return",
"nts_flat"
]
| 62.0625 | 25 |
def FromMicroseconds(self, micros):
"""Converts microseconds to Duration."""
self._NormalizeDuration(
micros // _MICROS_PER_SECOND,
(micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND) | [
"def",
"FromMicroseconds",
"(",
"self",
",",
"micros",
")",
":",
"self",
".",
"_NormalizeDuration",
"(",
"micros",
"//",
"_MICROS_PER_SECOND",
",",
"(",
"micros",
"%",
"_MICROS_PER_SECOND",
")",
"*",
"_NANOS_PER_MICROSECOND",
")"
]
| 41.4 | 8.6 |
def run_with_werkzeug(self, **options):
"""Run with werkzeug simple wsgi container."""
threaded = self.threads is not None and (self.threads > 0)
self.app.run(
host=self.host,
port=self.port,
debug=self.debug,
threaded=threaded,
**options
) | [
"def",
"run_with_werkzeug",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"threaded",
"=",
"self",
".",
"threads",
"is",
"not",
"None",
"and",
"(",
"self",
".",
"threads",
">",
"0",
")",
"self",
".",
"app",
".",
"run",
"(",
"host",
"=",
"self",
".",
"host",
",",
"port",
"=",
"self",
".",
"port",
",",
"debug",
"=",
"self",
".",
"debug",
",",
"threaded",
"=",
"threaded",
",",
"*",
"*",
"options",
")"
]
| 32.3 | 14.3 |
def set_attributes(trace):
"""Automatically set attributes for Google Cloud environment."""
spans = trace.get('spans')
for span in spans:
if span.get('attributes') is None:
span['attributes'] = {}
if is_gae_environment():
set_gae_attributes(span)
set_common_attributes(span)
set_monitored_resource_attributes(span) | [
"def",
"set_attributes",
"(",
"trace",
")",
":",
"spans",
"=",
"trace",
".",
"get",
"(",
"'spans'",
")",
"for",
"span",
"in",
"spans",
":",
"if",
"span",
".",
"get",
"(",
"'attributes'",
")",
"is",
"None",
":",
"span",
"[",
"'attributes'",
"]",
"=",
"{",
"}",
"if",
"is_gae_environment",
"(",
")",
":",
"set_gae_attributes",
"(",
"span",
")",
"set_common_attributes",
"(",
"span",
")",
"set_monitored_resource_attributes",
"(",
"span",
")"
]
| 28.692308 | 14.846154 |
def get_member_details(self, member_id, **kwargs): # noqa: E501
"""Get member details # noqa: E501
Get your member details (quota and role) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_member_details(member_id, async=True)
>>> result = thread.get()
:param async bool
:param str member_id: (required)
:return: MemberDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_member_details_with_http_info(member_id, **kwargs) # noqa: E501
else:
(data) = self.get_member_details_with_http_info(member_id, **kwargs) # noqa: E501
return data | [
"def",
"get_member_details",
"(",
"self",
",",
"member_id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"self",
".",
"get_member_details_with_http_info",
"(",
"member_id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_member_details_with_http_info",
"(",
"member_id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
]
| 43.095238 | 20.142857 |
def get_enrollments_for_regid(self, regid, params={},
include_courses=True):
"""
Return a list of enrollments for the passed user regid.
https://canvas.instructure.com/doc/api/enrollments.html#method.enrollments_api.index
"""
sis_user_id = self._sis_id(regid, sis_field="user")
url = USERS_API.format(sis_user_id) + "/enrollments"
courses = Courses() if include_courses else None
enrollments = []
for datum in self._get_paged_resource(url, params=params):
enrollment = CanvasEnrollment(data=datum)
if include_courses:
course_id = datum["course_id"]
course = courses.get_course(course_id)
if course.sis_course_id is not None:
enrollment.course = course
# the following 3 lines are not removed
# to be backward compatible.
enrollment.course_url = course.course_url
enrollment.course_name = course.name
enrollment.sis_course_id = course.sis_course_id
else:
enrollment.course_url = re.sub(
r'/users/\d+$', '', enrollment.html_url)
enrollments.append(enrollment)
return enrollments | [
"def",
"get_enrollments_for_regid",
"(",
"self",
",",
"regid",
",",
"params",
"=",
"{",
"}",
",",
"include_courses",
"=",
"True",
")",
":",
"sis_user_id",
"=",
"self",
".",
"_sis_id",
"(",
"regid",
",",
"sis_field",
"=",
"\"user\"",
")",
"url",
"=",
"USERS_API",
".",
"format",
"(",
"sis_user_id",
")",
"+",
"\"/enrollments\"",
"courses",
"=",
"Courses",
"(",
")",
"if",
"include_courses",
"else",
"None",
"enrollments",
"=",
"[",
"]",
"for",
"datum",
"in",
"self",
".",
"_get_paged_resource",
"(",
"url",
",",
"params",
"=",
"params",
")",
":",
"enrollment",
"=",
"CanvasEnrollment",
"(",
"data",
"=",
"datum",
")",
"if",
"include_courses",
":",
"course_id",
"=",
"datum",
"[",
"\"course_id\"",
"]",
"course",
"=",
"courses",
".",
"get_course",
"(",
"course_id",
")",
"if",
"course",
".",
"sis_course_id",
"is",
"not",
"None",
":",
"enrollment",
".",
"course",
"=",
"course",
"# the following 3 lines are not removed",
"# to be backward compatible.",
"enrollment",
".",
"course_url",
"=",
"course",
".",
"course_url",
"enrollment",
".",
"course_name",
"=",
"course",
".",
"name",
"enrollment",
".",
"sis_course_id",
"=",
"course",
".",
"sis_course_id",
"else",
":",
"enrollment",
".",
"course_url",
"=",
"re",
".",
"sub",
"(",
"r'/users/\\d+$'",
",",
"''",
",",
"enrollment",
".",
"html_url",
")",
"enrollments",
".",
"append",
"(",
"enrollment",
")",
"return",
"enrollments"
]
| 41.125 | 19.3125 |
def action_rename(self):
"""
Rename a shortcut
"""
# get old and new name from args
old = self.args['<old>']
new = self.args['<new>']
# select the old shortcut
self.db_query('''
SELECT id FROM shortcuts WHERE name=?
''', (old,))
r = self.db_fetch_one()
# error if old doesn't exist
if r == None:
print_err('Shortcut "%s" does not exist!' % old)
return
# error if new exists
if self.shortcut_exists(new):
print_err('Shortcut "%s" already exists!' % new)
return
id = r[0]
# rename in DB
self.db_exec('''
UPDATE shortcuts SET name=? WHERE id=?
''', (new, id))
# show OK message
print_msg('Shortcut "%s" renamed to "%s".' % (old, new)) | [
"def",
"action_rename",
"(",
"self",
")",
":",
"# get old and new name from args",
"old",
"=",
"self",
".",
"args",
"[",
"'<old>'",
"]",
"new",
"=",
"self",
".",
"args",
"[",
"'<new>'",
"]",
"# select the old shortcut",
"self",
".",
"db_query",
"(",
"'''\n SELECT id FROM shortcuts WHERE name=?\n '''",
",",
"(",
"old",
",",
")",
")",
"r",
"=",
"self",
".",
"db_fetch_one",
"(",
")",
"# error if old doesn't exist",
"if",
"r",
"==",
"None",
":",
"print_err",
"(",
"'Shortcut \"%s\" does not exist!'",
"%",
"old",
")",
"return",
"# error if new exists",
"if",
"self",
".",
"shortcut_exists",
"(",
"new",
")",
":",
"print_err",
"(",
"'Shortcut \"%s\" already exists!'",
"%",
"new",
")",
"return",
"id",
"=",
"r",
"[",
"0",
"]",
"# rename in DB",
"self",
".",
"db_exec",
"(",
"'''\n UPDATE shortcuts SET name=? WHERE id=?\n '''",
",",
"(",
"new",
",",
"id",
")",
")",
"# show OK message",
"print_msg",
"(",
"'Shortcut \"%s\" renamed to \"%s\".'",
"%",
"(",
"old",
",",
"new",
")",
")"
]
| 24.852941 | 18.323529 |
def _set_protocol_vrrp(self, v, load=False):
"""
Setter method for protocol_vrrp, mapped from YANG variable /protocol_vrrp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_protocol_vrrp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protocol_vrrp() directly.
YANG Description: An intermediary node that separates the protocol vrrp from other protocols.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=protocol_vrrp.protocol_vrrp, is_container='container', presence=False, yang_name="protocol-vrrp", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'RUNNCFG_LEVEL_ROUTER_GLOBAL'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """protocol_vrrp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=protocol_vrrp.protocol_vrrp, is_container='container', presence=False, yang_name="protocol-vrrp", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'RUNNCFG_LEVEL_ROUTER_GLOBAL'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""",
})
self.__protocol_vrrp = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_protocol_vrrp",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"protocol_vrrp",
".",
"protocol_vrrp",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"protocol-vrrp\"",
",",
"rest_name",
"=",
"\"\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'cli-drop-node-name'",
":",
"None",
",",
"u'sort-priority'",
":",
"u'RUNNCFG_LEVEL_ROUTER_GLOBAL'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-vrrp'",
",",
"defining_module",
"=",
"'brocade-vrrp'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"protocol_vrrp must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=protocol_vrrp.protocol_vrrp, is_container='container', presence=False, yang_name=\"protocol-vrrp\", rest_name=\"\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'RUNNCFG_LEVEL_ROUTER_GLOBAL'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__protocol_vrrp",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
]
| 73.833333 | 36.041667 |
def getSpec(cls):
"""
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSpec`.
The parameters collection is constructed based on the parameters specified
by the various components (spatialSpec, temporalSpec and otherSpec)
"""
spec = cls.getBaseSpec()
t, o = _getAdditionalSpecs(temporalImp=gDefaultTemporalImp)
spec['parameters'].update(t)
spec['parameters'].update(o)
return spec | [
"def",
"getSpec",
"(",
"cls",
")",
":",
"spec",
"=",
"cls",
".",
"getBaseSpec",
"(",
")",
"t",
",",
"o",
"=",
"_getAdditionalSpecs",
"(",
"temporalImp",
"=",
"gDefaultTemporalImp",
")",
"spec",
"[",
"'parameters'",
"]",
".",
"update",
"(",
"t",
")",
"spec",
"[",
"'parameters'",
"]",
".",
"update",
"(",
"o",
")",
"return",
"spec"
]
| 32.461538 | 21.538462 |
def _check_configure_args(configure_args: Dict[str, Any]) -> Dict[str, Any]:
""" Check the arguments passed to configure.
Raises an exception on failure. On success, returns a dict of
configure_args with any necessary mutations.
"""
# SSID must always be present
if not configure_args.get('ssid')\
or not isinstance(configure_args['ssid'], str):
raise ConfigureArgsError("SSID must be specified")
# If specified, hidden must be a bool
if not configure_args.get('hidden'):
configure_args['hidden'] = False
elif not isinstance(configure_args['hidden'], bool):
raise ConfigureArgsError('If specified, hidden must be a bool')
configure_args['securityType'] = _deduce_security(configure_args)
# If we have wpa2-personal, we need a psk
if configure_args['securityType'] == nmcli.SECURITY_TYPES.WPA_PSK:
if not configure_args.get('psk'):
raise ConfigureArgsError(
'If securityType is wpa-psk, psk must be specified')
return configure_args
# If we have wpa2-enterprise, we need eap config, and we need to check
# it
if configure_args['securityType'] == nmcli.SECURITY_TYPES.WPA_EAP:
if not configure_args.get('eapConfig'):
raise ConfigureArgsError(
'If securityType is wpa-eap, eapConfig must be specified')
configure_args['eapConfig']\
= _eap_check_config(configure_args['eapConfig'])
return configure_args
# If we’re still here we have no security and we’re done
return configure_args | [
"def",
"_check_configure_args",
"(",
"configure_args",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"# SSID must always be present",
"if",
"not",
"configure_args",
".",
"get",
"(",
"'ssid'",
")",
"or",
"not",
"isinstance",
"(",
"configure_args",
"[",
"'ssid'",
"]",
",",
"str",
")",
":",
"raise",
"ConfigureArgsError",
"(",
"\"SSID must be specified\"",
")",
"# If specified, hidden must be a bool",
"if",
"not",
"configure_args",
".",
"get",
"(",
"'hidden'",
")",
":",
"configure_args",
"[",
"'hidden'",
"]",
"=",
"False",
"elif",
"not",
"isinstance",
"(",
"configure_args",
"[",
"'hidden'",
"]",
",",
"bool",
")",
":",
"raise",
"ConfigureArgsError",
"(",
"'If specified, hidden must be a bool'",
")",
"configure_args",
"[",
"'securityType'",
"]",
"=",
"_deduce_security",
"(",
"configure_args",
")",
"# If we have wpa2-personal, we need a psk",
"if",
"configure_args",
"[",
"'securityType'",
"]",
"==",
"nmcli",
".",
"SECURITY_TYPES",
".",
"WPA_PSK",
":",
"if",
"not",
"configure_args",
".",
"get",
"(",
"'psk'",
")",
":",
"raise",
"ConfigureArgsError",
"(",
"'If securityType is wpa-psk, psk must be specified'",
")",
"return",
"configure_args",
"# If we have wpa2-enterprise, we need eap config, and we need to check",
"# it",
"if",
"configure_args",
"[",
"'securityType'",
"]",
"==",
"nmcli",
".",
"SECURITY_TYPES",
".",
"WPA_EAP",
":",
"if",
"not",
"configure_args",
".",
"get",
"(",
"'eapConfig'",
")",
":",
"raise",
"ConfigureArgsError",
"(",
"'If securityType is wpa-eap, eapConfig must be specified'",
")",
"configure_args",
"[",
"'eapConfig'",
"]",
"=",
"_eap_check_config",
"(",
"configure_args",
"[",
"'eapConfig'",
"]",
")",
"return",
"configure_args",
"# If we’re still here we have no security and we’re done",
"return",
"configure_args"
]
| 42 | 18.243243 |
def build(cls, seqs: Iterable[int], uid: bool = False) -> 'SequenceSet':
"""Build a new sequence set that contains the given values using as
few groups as possible.
Args:
seqs: The sequence values to build.
uid: True if the sequences refer to message UIDs.
"""
seqs_list = sorted(set(seqs))
groups: List[Union[int, Tuple[int, int]]] = []
group: Union[int, Tuple[int, int]] = seqs_list[0]
for i in range(1, len(seqs_list)):
group_i = seqs_list[i]
if isinstance(group, int):
if group_i == group + 1:
group = (group, group_i)
else:
groups.append(group)
group = group_i
elif isinstance(group, tuple):
if group_i == group[1] + 1:
group = (group[0], group_i)
else:
groups.append(group)
group = group_i
groups.append(group)
return SequenceSet(groups, uid) | [
"def",
"build",
"(",
"cls",
",",
"seqs",
":",
"Iterable",
"[",
"int",
"]",
",",
"uid",
":",
"bool",
"=",
"False",
")",
"->",
"'SequenceSet'",
":",
"seqs_list",
"=",
"sorted",
"(",
"set",
"(",
"seqs",
")",
")",
"groups",
":",
"List",
"[",
"Union",
"[",
"int",
",",
"Tuple",
"[",
"int",
",",
"int",
"]",
"]",
"]",
"=",
"[",
"]",
"group",
":",
"Union",
"[",
"int",
",",
"Tuple",
"[",
"int",
",",
"int",
"]",
"]",
"=",
"seqs_list",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"seqs_list",
")",
")",
":",
"group_i",
"=",
"seqs_list",
"[",
"i",
"]",
"if",
"isinstance",
"(",
"group",
",",
"int",
")",
":",
"if",
"group_i",
"==",
"group",
"+",
"1",
":",
"group",
"=",
"(",
"group",
",",
"group_i",
")",
"else",
":",
"groups",
".",
"append",
"(",
"group",
")",
"group",
"=",
"group_i",
"elif",
"isinstance",
"(",
"group",
",",
"tuple",
")",
":",
"if",
"group_i",
"==",
"group",
"[",
"1",
"]",
"+",
"1",
":",
"group",
"=",
"(",
"group",
"[",
"0",
"]",
",",
"group_i",
")",
"else",
":",
"groups",
".",
"append",
"(",
"group",
")",
"group",
"=",
"group_i",
"groups",
".",
"append",
"(",
"group",
")",
"return",
"SequenceSet",
"(",
"groups",
",",
"uid",
")"
]
| 37.392857 | 10.607143 |
def add_args():
"""Adds commandline arguments and formatted Help"""
parser = argparse.ArgumentParser()
parser.add_argument('-host', action='store', dest='host', default='127.0.0.1', help='DEFAULT "127.0.0.1"')
parser.add_argument('-port', action='store', dest='port', default='2947', help='DEFAULT 2947', type=int)
parser.add_argument('-json', dest='gpsd_protocol', const='json', action='store_const', default='json', help='DEFAULT JSON objects */')
parser.add_argument('-device', dest='devicepath', action='store', help='alternate devicepath e.g.,"-device /dev/ttyUSB4"')
# Infrequently used options
parser.add_argument('-nmea', dest='gpsd_protocol', const='nmea', action='store_const', help='*/ output in NMEA */')
# parser.add_argument('-rare', dest='gpsd_protocol', const='rare', action='store_const', help='*/ output of packets in hex */')
# parser.add_argument('-raw', dest='gpsd_protocol', const='raw', action='store_const', help='*/ output of raw packets */')
# parser.add_argument('-scaled', dest='gpsd_protocol', const='scaled', action='store_const', help='*/ scale output to floats */')
# parser.add_argument('-timing', dest='gpsd_protocol', const='timing', action='store_const', help='*/ timing information */')
# parser.add_argument('-split24', dest='gpsd_protocol', const='split24', action='store_const', help='*/ split AIS Type 24s */')
# parser.add_argument('-pps', dest='gpsd_protocol', const='pps', action='store_const', help='*/ enable PPS JSON */')
parser.add_argument('-v', '--version', action='version', version='Version: {}'.format(__version__))
cli_args = parser.parse_args()
return cli_args | [
"def",
"add_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'-host'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'host'",
",",
"default",
"=",
"'127.0.0.1'",
",",
"help",
"=",
"'DEFAULT \"127.0.0.1\"'",
")",
"parser",
".",
"add_argument",
"(",
"'-port'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'port'",
",",
"default",
"=",
"'2947'",
",",
"help",
"=",
"'DEFAULT 2947'",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'-json'",
",",
"dest",
"=",
"'gpsd_protocol'",
",",
"const",
"=",
"'json'",
",",
"action",
"=",
"'store_const'",
",",
"default",
"=",
"'json'",
",",
"help",
"=",
"'DEFAULT JSON objects */'",
")",
"parser",
".",
"add_argument",
"(",
"'-device'",
",",
"dest",
"=",
"'devicepath'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'alternate devicepath e.g.,\"-device /dev/ttyUSB4\"'",
")",
"# Infrequently used options",
"parser",
".",
"add_argument",
"(",
"'-nmea'",
",",
"dest",
"=",
"'gpsd_protocol'",
",",
"const",
"=",
"'nmea'",
",",
"action",
"=",
"'store_const'",
",",
"help",
"=",
"'*/ output in NMEA */'",
")",
"# parser.add_argument('-rare', dest='gpsd_protocol', const='rare', action='store_const', help='*/ output of packets in hex */')",
"# parser.add_argument('-raw', dest='gpsd_protocol', const='raw', action='store_const', help='*/ output of raw packets */')",
"# parser.add_argument('-scaled', dest='gpsd_protocol', const='scaled', action='store_const', help='*/ scale output to floats */')",
"# parser.add_argument('-timing', dest='gpsd_protocol', const='timing', action='store_const', help='*/ timing information */')",
"# parser.add_argument('-split24', dest='gpsd_protocol', const='split24', action='store_const', help='*/ split AIS Type 24s */')",
"# parser.add_argument('-pps', dest='gpsd_protocol', const='pps', action='store_const', help='*/ enable PPS JSON */')",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"'Version: {}'",
".",
"format",
"(",
"__version__",
")",
")",
"cli_args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"cli_args"
]
| 87.684211 | 57.736842 |
def send(self, request_id, payload):
"""
Send a request to Kafka
Arguments::
request_id (int): can be any int (used only for debug logging...)
payload: an encoded kafka packet (see KafkaProtocol)
"""
log.debug("About to send %d bytes to Kafka, request %d" % (len(payload), request_id))
# Make sure we have a connection
if not self._sock:
self.reinit()
try:
self._sock.sendall(payload)
except socket.error:
log.exception('Unable to send payload to Kafka')
self._raise_connection_error() | [
"def",
"send",
"(",
"self",
",",
"request_id",
",",
"payload",
")",
":",
"log",
".",
"debug",
"(",
"\"About to send %d bytes to Kafka, request %d\"",
"%",
"(",
"len",
"(",
"payload",
")",
",",
"request_id",
")",
")",
"# Make sure we have a connection",
"if",
"not",
"self",
".",
"_sock",
":",
"self",
".",
"reinit",
"(",
")",
"try",
":",
"self",
".",
"_sock",
".",
"sendall",
"(",
"payload",
")",
"except",
"socket",
".",
"error",
":",
"log",
".",
"exception",
"(",
"'Unable to send payload to Kafka'",
")",
"self",
".",
"_raise_connection_error",
"(",
")"
]
| 30.7 | 20 |
def process_document(self, doc):
"""
Add your code for processing the document
"""
raw = doc.select_segments("$.raw_content")[0]
extractions = doc.extract(self.inferlink_extractor, raw)
doc.store(extractions, "inferlink_extraction")
return list() | [
"def",
"process_document",
"(",
"self",
",",
"doc",
")",
":",
"raw",
"=",
"doc",
".",
"select_segments",
"(",
"\"$.raw_content\"",
")",
"[",
"0",
"]",
"extractions",
"=",
"doc",
".",
"extract",
"(",
"self",
".",
"inferlink_extractor",
",",
"raw",
")",
"doc",
".",
"store",
"(",
"extractions",
",",
"\"inferlink_extraction\"",
")",
"return",
"list",
"(",
")"
]
| 32.777778 | 14.111111 |
def stop(self, force=False):
"""
Send a terminate request.
:param bool force: ignore the remote devices response
"""
if self._initialized:
self.send(C1218TerminateRequest())
data = self.recv()
if data == b'\x00' or force:
self._initialized = False
self._toggle_bit = False
return True
return False | [
"def",
"stop",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"if",
"self",
".",
"_initialized",
":",
"self",
".",
"send",
"(",
"C1218TerminateRequest",
"(",
")",
")",
"data",
"=",
"self",
".",
"recv",
"(",
")",
"if",
"data",
"==",
"b'\\x00'",
"or",
"force",
":",
"self",
".",
"_initialized",
"=",
"False",
"self",
".",
"_toggle_bit",
"=",
"False",
"return",
"True",
"return",
"False"
]
| 22.714286 | 14.428571 |
def add_node(self, node, attrs = None):
"""
Add given node to the graph.
@attention: While nodes can be of any type, it's strongly recommended to use only
numbers and single-line strings as node identifiers if you intend to use write().
@type node: node
@param node: Node identifier.
@type attrs: list
@param attrs: List of node attributes specified as (attribute, value) tuples.
"""
if attrs is None:
attrs = []
if (node not in self.node_neighbors):
self.node_neighbors[node] = []
self.node_incidence[node] = []
self.node_attr[node] = attrs
else:
raise AdditionError("Node %s already in digraph" % node) | [
"def",
"add_node",
"(",
"self",
",",
"node",
",",
"attrs",
"=",
"None",
")",
":",
"if",
"attrs",
"is",
"None",
":",
"attrs",
"=",
"[",
"]",
"if",
"(",
"node",
"not",
"in",
"self",
".",
"node_neighbors",
")",
":",
"self",
".",
"node_neighbors",
"[",
"node",
"]",
"=",
"[",
"]",
"self",
".",
"node_incidence",
"[",
"node",
"]",
"=",
"[",
"]",
"self",
".",
"node_attr",
"[",
"node",
"]",
"=",
"attrs",
"else",
":",
"raise",
"AdditionError",
"(",
"\"Node %s already in digraph\"",
"%",
"node",
")"
]
| 36.238095 | 18.142857 |
def set_data(self, data=None, **kwargs):
"""Set the line data
Parameters
----------
data : array-like
The data.
**kwargs : dict
Keywoard arguments to pass to MarkerVisual and LineVisal.
"""
if data is None:
pos = None
else:
if isinstance(data, tuple):
pos = np.array(data).T.astype(np.float32)
else:
pos = np.atleast_1d(data).astype(np.float32)
if pos.ndim == 1:
pos = pos[:, np.newaxis]
elif pos.ndim > 2:
raise ValueError('data must have at most two dimensions')
if pos.size == 0:
pos = self._line.pos
# if both args and keywords are zero, then there is no
# point in calling this function.
if len(kwargs) == 0:
raise TypeError("neither line points nor line properties"
"are provided")
elif pos.shape[1] == 1:
x = np.arange(pos.shape[0], dtype=np.float32)[:, np.newaxis]
pos = np.concatenate((x, pos), axis=1)
# if args are empty, don't modify position
elif pos.shape[1] > 3:
raise TypeError("Too many coordinates given (%s; max is 3)."
% pos.shape[1])
# todo: have both sub-visuals share the same buffers.
line_kwargs = {}
for k in self._line_kwargs:
if k in kwargs:
k_ = self._kw_trans[k] if k in self._kw_trans else k
line_kwargs[k] = kwargs.pop(k_)
if pos is not None or len(line_kwargs) > 0:
self._line.set_data(pos=pos, **line_kwargs)
marker_kwargs = {}
for k in self._marker_kwargs:
if k in kwargs:
k_ = self._kw_trans[k] if k in self._kw_trans else k
marker_kwargs[k_] = kwargs.pop(k)
if pos is not None or len(marker_kwargs) > 0:
self._markers.set_data(pos=pos, **marker_kwargs)
if len(kwargs) > 0:
raise TypeError("Invalid keyword arguments: %s" % kwargs.keys()) | [
"def",
"set_data",
"(",
"self",
",",
"data",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"data",
"is",
"None",
":",
"pos",
"=",
"None",
"else",
":",
"if",
"isinstance",
"(",
"data",
",",
"tuple",
")",
":",
"pos",
"=",
"np",
".",
"array",
"(",
"data",
")",
".",
"T",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"else",
":",
"pos",
"=",
"np",
".",
"atleast_1d",
"(",
"data",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"if",
"pos",
".",
"ndim",
"==",
"1",
":",
"pos",
"=",
"pos",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"elif",
"pos",
".",
"ndim",
">",
"2",
":",
"raise",
"ValueError",
"(",
"'data must have at most two dimensions'",
")",
"if",
"pos",
".",
"size",
"==",
"0",
":",
"pos",
"=",
"self",
".",
"_line",
".",
"pos",
"# if both args and keywords are zero, then there is no",
"# point in calling this function.",
"if",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"raise",
"TypeError",
"(",
"\"neither line points nor line properties\"",
"\"are provided\"",
")",
"elif",
"pos",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"x",
"=",
"np",
".",
"arange",
"(",
"pos",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"pos",
"=",
"np",
".",
"concatenate",
"(",
"(",
"x",
",",
"pos",
")",
",",
"axis",
"=",
"1",
")",
"# if args are empty, don't modify position",
"elif",
"pos",
".",
"shape",
"[",
"1",
"]",
">",
"3",
":",
"raise",
"TypeError",
"(",
"\"Too many coordinates given (%s; max is 3).\"",
"%",
"pos",
".",
"shape",
"[",
"1",
"]",
")",
"# todo: have both sub-visuals share the same buffers.",
"line_kwargs",
"=",
"{",
"}",
"for",
"k",
"in",
"self",
".",
"_line_kwargs",
":",
"if",
"k",
"in",
"kwargs",
":",
"k_",
"=",
"self",
".",
"_kw_trans",
"[",
"k",
"]",
"if",
"k",
"in",
"self",
".",
"_kw_trans",
"else",
"k",
"line_kwargs",
"[",
"k",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"k_",
")",
"if",
"pos",
"is",
"not",
"None",
"or",
"len",
"(",
"line_kwargs",
")",
">",
"0",
":",
"self",
".",
"_line",
".",
"set_data",
"(",
"pos",
"=",
"pos",
",",
"*",
"*",
"line_kwargs",
")",
"marker_kwargs",
"=",
"{",
"}",
"for",
"k",
"in",
"self",
".",
"_marker_kwargs",
":",
"if",
"k",
"in",
"kwargs",
":",
"k_",
"=",
"self",
".",
"_kw_trans",
"[",
"k",
"]",
"if",
"k",
"in",
"self",
".",
"_kw_trans",
"else",
"k",
"marker_kwargs",
"[",
"k_",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"k",
")",
"if",
"pos",
"is",
"not",
"None",
"or",
"len",
"(",
"marker_kwargs",
")",
">",
"0",
":",
"self",
".",
"_markers",
".",
"set_data",
"(",
"pos",
"=",
"pos",
",",
"*",
"*",
"marker_kwargs",
")",
"if",
"len",
"(",
"kwargs",
")",
">",
"0",
":",
"raise",
"TypeError",
"(",
"\"Invalid keyword arguments: %s\"",
"%",
"kwargs",
".",
"keys",
"(",
")",
")"
]
| 38.105263 | 18.052632 |
def subtract_and_intersect_circle(self, center, radius):
'''
Circle subtraction / intersection only supported by 2-gon regions, otherwise a VennRegionException is thrown.
In addition, such an exception will be thrown if the circle to be subtracted is completely within the region and forms a "hole".
The result may be either a VennArcgonRegion or a VennMultipieceRegion (the latter happens when the circle "splits" a crescent in two).
'''
if len(self.arcs) != 2:
raise VennRegionException("Circle subtraction and intersection with poly-arc regions is currently only supported for 2-arc-gons.")
# In the following we consider the 2-arc-gon case.
# Before we do anything, we check for a special case, where the circle of interest is one of the two circles forming the arcs.
# In this case we can determine the answer quite easily.
matching_arcs = [a for a in self.arcs if a.lies_on_circle(center, radius)]
if len(matching_arcs) != 0:
# If the circle matches a positive arc, the result is [empty, self], otherwise [self, empty]
return [VennEmptyRegion(), self] if matching_arcs[0].direction else [self, VennEmptyRegion()]
# Consider the intersection points of the circle with the arcs.
# If any of the intersection points corresponds exactly to any of the arc's endpoints, we will end up with
# a lot of messy special cases (as if the usual situation is not messy enough, eh).
# To avoid that, we cheat by slightly increasing the circle's radius until this is not the case any more.
center = np.asarray(center)
illegal_intersections = [a.start_point() for a in self.arcs]
while True:
valid = True
intersections = [a.intersect_circle(center, radius) for a in self.arcs]
for ints in intersections:
for pt in ints:
for illegal_pt in illegal_intersections:
if np.all(abs(pt - illegal_pt) < tol):
valid = False
if valid:
break
else:
radius += tol
# There must be an even number of those points in total.
# (If this is not the case, then we have an unfortunate case with weird numeric errors [TODO: find examples and deal with it?]).
# There are three possibilities with the following subcases:
# I. No intersection points
# a) The polyarc is completely within the circle.
# result = [ empty, self ]
# b) The polyarc is completely outside the circle.
# result = [ self, empty ]
# II. Four intersection points, two for each arc. Points x1, x2 for arc X and y1, y2 for arc Y, ordered along the arc.
# a) The polyarc endpoints are both outside the circle.
# result_subtraction = a combination of two 3-arc polyarcs:
# 1: {X - start to x1,
# x1 to y2 along circle (negative direction)),
# Y - y2 to end}
# 2: {Y start to y1,
# y1 to x2 along circle (negative direction)),
# X - x2 to end}
# b) The polyarc endpoints are both inside the circle
# same as above, but the "along circle" arc directions are flipped and subtraction/intersection parts are exchanged
# III. Two intersection points
# a) One arc, X, has two intersection points i & j, another arc, Y, has no intersection points
# a.1) Polyarc endpoints are outside the circle
# result_subtraction = {X from start to i, circle i to j (direction = negative), X j to end, Y}
# result_intersection = {X i to j, circle j to i (direction = positive}
# a.2) Polyarc endpoints are inside the circle
# result_subtraction = {X i to j, circle j to i negative}
# result_intersection = {X 0 to i, circle i to j positive, X j to end, Y}
# b) Both arcs, X and Y, have one intersection point each. In this case one of the arc endpoints must be inside circle, another outside.
# call the arc that starts with the outside point X, the other arc Y.
# result_subtraction = {X start to intersection, intersection to intersection along circle (negative direction), Y from intersection to end}
# result_intersection = {X intersection to end, Y start to intersecton, intersection to intersecion along circle (positive)}
center = np.asarray(center)
intersections = [a.intersect_circle(center, radius) for a in self.arcs]
if len(intersections[0]) == 0 and len(intersections[1]) == 0:
# Case I
if point_in_circle(self.arcs[0].start_point(), center, radius):
# Case I.a)
return [VennEmptyRegion(), self]
else:
# Case I.b)
return [self, VennEmptyRegion()]
elif len(intersections[0]) == 2 and len(intersections[1]) == 2:
# Case II. a) or b)
case_II_a = not point_in_circle(self.arcs[0].start_point(), center, radius)
a1 = self.arcs[0].subarc_between_points(None, intersections[0][0])
a2 = Arc(center, radius,
vector_angle_in_degrees(intersections[0][0] - center),
vector_angle_in_degrees(intersections[1][1] - center),
not case_II_a)
a2.fix_360_to_0()
a3 = self.arcs[1].subarc_between_points(intersections[1][1], None)
piece1 = VennArcgonRegion([a1, a2, a3])
b1 = self.arcs[1].subarc_between_points(None, intersections[1][0])
b2 = Arc(center, radius,
vector_angle_in_degrees(intersections[1][0] - center),
vector_angle_in_degrees(intersections[0][1] - center),
not case_II_a)
b2.fix_360_to_0()
b3 = self.arcs[0].subarc_between_points(intersections[0][1], None)
piece2 = VennArcgonRegion([b1, b2, b3])
subtraction = VennMultipieceRegion([piece1, piece2])
c1 = self.arcs[0].subarc(a1.to_angle, b3.from_angle)
c2 = b2.reversed()
c3 = self.arcs[1].subarc(b1.to_angle, a3.from_angle)
c4 = a2.reversed()
intersection = VennArcgonRegion([c1, c2, c3, c4])
return [subtraction, intersection] if case_II_a else [intersection, subtraction]
else:
# Case III. Yuck.
if len(intersections[0]) == 0 or len(intersections[1]) == 0:
# Case III.a)
x = 0 if len(intersections[0]) != 0 else 1
y = 1 - x
if len(intersections[x]) != 2:
warnings.warn("Numeric precision error during polyarc intersection, case IIIa. Expect wrong results.")
intersections[x] = [intersections[x][0], intersections[x][0]] # This way we'll at least produce some result, although it will probably be wrong
if not point_in_circle(self.arcs[0].start_point(), center, radius):
# Case III.a.1)
# result_subtraction = {X from start to i, circle i to j (direction = negative), X j to end, Y}
a1 = self.arcs[x].subarc_between_points(None, intersections[x][0])
a2 = Arc(center, radius,
vector_angle_in_degrees(intersections[x][0] - center),
vector_angle_in_degrees(intersections[x][1] - center),
False)
a3 = self.arcs[x].subarc_between_points(intersections[x][1], None)
a4 = self.arcs[y]
subtraction = VennArcgonRegion([a1, a2, a3, a4])
# result_intersection = {X i to j, circle j to i (direction = positive)}
b1 = self.arcs[x].subarc(a1.to_angle, a3.from_angle)
b2 = a2.reversed()
intersection = VennArcgonRegion([b1, b2])
return [subtraction, intersection]
else:
# Case III.a.2)
# result_subtraction = {X i to j, circle j to i negative}
a1 = self.arcs[x].subarc_between_points(intersections[x][0], intersections[x][1])
a2 = Arc(center, radius,
vector_angle_in_degrees(intersections[x][1] - center),
vector_angle_in_degrees(intersections[x][0] - center),
False)
subtraction = VennArcgonRegion([a1, a2])
# result_intersection = {X 0 to i, circle i to j positive, X j to end, Y}
b1 = self.arcs[x].subarc(None, a1.from_angle)
b2 = a2.reversed()
b3 = self.arcs[x].subarc(a1.to_angle, None)
b4 = self.arcs[y]
intersection = VennArcgonRegion([b1, b2, b3, b4])
return [subtraction, intersection]
else:
# Case III.b)
if len(intersections[0]) == 2 or len(intersections[1]) == 2:
warnings.warn("Numeric precision error during polyarc intersection, case IIIb. Expect wrong results.")
# One of the arcs must start outside the circle, call it x
x = 0 if not point_in_circle(self.arcs[0].start_point(), center, radius) else 1
y = 1 - x
a1 = self.arcs[x].subarc_between_points(None, intersections[x][0])
a2 = Arc(center, radius,
vector_angle_in_degrees(intersections[x][0] - center),
vector_angle_in_degrees(intersections[y][0] - center), False)
a3 = self.arcs[y].subarc_between_points(intersections[y][0], None)
subtraction = VennArcgonRegion([a1, a2, a3])
b1 = self.arcs[x].subarc(a1.to_angle, None)
b2 = self.arcs[y].subarc(None, a3.from_angle)
b3 = a2.reversed()
intersection = VennArcgonRegion([b1, b2, b3])
return [subtraction, intersection] | [
"def",
"subtract_and_intersect_circle",
"(",
"self",
",",
"center",
",",
"radius",
")",
":",
"if",
"len",
"(",
"self",
".",
"arcs",
")",
"!=",
"2",
":",
"raise",
"VennRegionException",
"(",
"\"Circle subtraction and intersection with poly-arc regions is currently only supported for 2-arc-gons.\"",
")",
"# In the following we consider the 2-arc-gon case.",
"# Before we do anything, we check for a special case, where the circle of interest is one of the two circles forming the arcs.",
"# In this case we can determine the answer quite easily.",
"matching_arcs",
"=",
"[",
"a",
"for",
"a",
"in",
"self",
".",
"arcs",
"if",
"a",
".",
"lies_on_circle",
"(",
"center",
",",
"radius",
")",
"]",
"if",
"len",
"(",
"matching_arcs",
")",
"!=",
"0",
":",
"# If the circle matches a positive arc, the result is [empty, self], otherwise [self, empty]",
"return",
"[",
"VennEmptyRegion",
"(",
")",
",",
"self",
"]",
"if",
"matching_arcs",
"[",
"0",
"]",
".",
"direction",
"else",
"[",
"self",
",",
"VennEmptyRegion",
"(",
")",
"]",
"# Consider the intersection points of the circle with the arcs.",
"# If any of the intersection points corresponds exactly to any of the arc's endpoints, we will end up with",
"# a lot of messy special cases (as if the usual situation is not messy enough, eh).",
"# To avoid that, we cheat by slightly increasing the circle's radius until this is not the case any more.",
"center",
"=",
"np",
".",
"asarray",
"(",
"center",
")",
"illegal_intersections",
"=",
"[",
"a",
".",
"start_point",
"(",
")",
"for",
"a",
"in",
"self",
".",
"arcs",
"]",
"while",
"True",
":",
"valid",
"=",
"True",
"intersections",
"=",
"[",
"a",
".",
"intersect_circle",
"(",
"center",
",",
"radius",
")",
"for",
"a",
"in",
"self",
".",
"arcs",
"]",
"for",
"ints",
"in",
"intersections",
":",
"for",
"pt",
"in",
"ints",
":",
"for",
"illegal_pt",
"in",
"illegal_intersections",
":",
"if",
"np",
".",
"all",
"(",
"abs",
"(",
"pt",
"-",
"illegal_pt",
")",
"<",
"tol",
")",
":",
"valid",
"=",
"False",
"if",
"valid",
":",
"break",
"else",
":",
"radius",
"+=",
"tol",
"# There must be an even number of those points in total.",
"# (If this is not the case, then we have an unfortunate case with weird numeric errors [TODO: find examples and deal with it?]).",
"# There are three possibilities with the following subcases:",
"# I. No intersection points",
"# a) The polyarc is completely within the circle.",
"# result = [ empty, self ]",
"# b) The polyarc is completely outside the circle.",
"# result = [ self, empty ]",
"# II. Four intersection points, two for each arc. Points x1, x2 for arc X and y1, y2 for arc Y, ordered along the arc.",
"# a) The polyarc endpoints are both outside the circle.",
"# result_subtraction = a combination of two 3-arc polyarcs:",
"# 1: {X - start to x1,",
"# x1 to y2 along circle (negative direction)),",
"# Y - y2 to end}",
"# 2: {Y start to y1,",
"# y1 to x2 along circle (negative direction)),",
"# X - x2 to end}",
"# b) The polyarc endpoints are both inside the circle",
"# same as above, but the \"along circle\" arc directions are flipped and subtraction/intersection parts are exchanged",
"# III. Two intersection points",
"# a) One arc, X, has two intersection points i & j, another arc, Y, has no intersection points",
"# a.1) Polyarc endpoints are outside the circle",
"# result_subtraction = {X from start to i, circle i to j (direction = negative), X j to end, Y}",
"# result_intersection = {X i to j, circle j to i (direction = positive}",
"# a.2) Polyarc endpoints are inside the circle",
"# result_subtraction = {X i to j, circle j to i negative}",
"# result_intersection = {X 0 to i, circle i to j positive, X j to end, Y}",
"# b) Both arcs, X and Y, have one intersection point each. In this case one of the arc endpoints must be inside circle, another outside.",
"# call the arc that starts with the outside point X, the other arc Y.",
"# result_subtraction = {X start to intersection, intersection to intersection along circle (negative direction), Y from intersection to end}",
"# result_intersection = {X intersection to end, Y start to intersecton, intersection to intersecion along circle (positive)}",
"center",
"=",
"np",
".",
"asarray",
"(",
"center",
")",
"intersections",
"=",
"[",
"a",
".",
"intersect_circle",
"(",
"center",
",",
"radius",
")",
"for",
"a",
"in",
"self",
".",
"arcs",
"]",
"if",
"len",
"(",
"intersections",
"[",
"0",
"]",
")",
"==",
"0",
"and",
"len",
"(",
"intersections",
"[",
"1",
"]",
")",
"==",
"0",
":",
"# Case I",
"if",
"point_in_circle",
"(",
"self",
".",
"arcs",
"[",
"0",
"]",
".",
"start_point",
"(",
")",
",",
"center",
",",
"radius",
")",
":",
"# Case I.a)",
"return",
"[",
"VennEmptyRegion",
"(",
")",
",",
"self",
"]",
"else",
":",
"# Case I.b)",
"return",
"[",
"self",
",",
"VennEmptyRegion",
"(",
")",
"]",
"elif",
"len",
"(",
"intersections",
"[",
"0",
"]",
")",
"==",
"2",
"and",
"len",
"(",
"intersections",
"[",
"1",
"]",
")",
"==",
"2",
":",
"# Case II. a) or b)",
"case_II_a",
"=",
"not",
"point_in_circle",
"(",
"self",
".",
"arcs",
"[",
"0",
"]",
".",
"start_point",
"(",
")",
",",
"center",
",",
"radius",
")",
"a1",
"=",
"self",
".",
"arcs",
"[",
"0",
"]",
".",
"subarc_between_points",
"(",
"None",
",",
"intersections",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"a2",
"=",
"Arc",
"(",
"center",
",",
"radius",
",",
"vector_angle_in_degrees",
"(",
"intersections",
"[",
"0",
"]",
"[",
"0",
"]",
"-",
"center",
")",
",",
"vector_angle_in_degrees",
"(",
"intersections",
"[",
"1",
"]",
"[",
"1",
"]",
"-",
"center",
")",
",",
"not",
"case_II_a",
")",
"a2",
".",
"fix_360_to_0",
"(",
")",
"a3",
"=",
"self",
".",
"arcs",
"[",
"1",
"]",
".",
"subarc_between_points",
"(",
"intersections",
"[",
"1",
"]",
"[",
"1",
"]",
",",
"None",
")",
"piece1",
"=",
"VennArcgonRegion",
"(",
"[",
"a1",
",",
"a2",
",",
"a3",
"]",
")",
"b1",
"=",
"self",
".",
"arcs",
"[",
"1",
"]",
".",
"subarc_between_points",
"(",
"None",
",",
"intersections",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"b2",
"=",
"Arc",
"(",
"center",
",",
"radius",
",",
"vector_angle_in_degrees",
"(",
"intersections",
"[",
"1",
"]",
"[",
"0",
"]",
"-",
"center",
")",
",",
"vector_angle_in_degrees",
"(",
"intersections",
"[",
"0",
"]",
"[",
"1",
"]",
"-",
"center",
")",
",",
"not",
"case_II_a",
")",
"b2",
".",
"fix_360_to_0",
"(",
")",
"b3",
"=",
"self",
".",
"arcs",
"[",
"0",
"]",
".",
"subarc_between_points",
"(",
"intersections",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"None",
")",
"piece2",
"=",
"VennArcgonRegion",
"(",
"[",
"b1",
",",
"b2",
",",
"b3",
"]",
")",
"subtraction",
"=",
"VennMultipieceRegion",
"(",
"[",
"piece1",
",",
"piece2",
"]",
")",
"c1",
"=",
"self",
".",
"arcs",
"[",
"0",
"]",
".",
"subarc",
"(",
"a1",
".",
"to_angle",
",",
"b3",
".",
"from_angle",
")",
"c2",
"=",
"b2",
".",
"reversed",
"(",
")",
"c3",
"=",
"self",
".",
"arcs",
"[",
"1",
"]",
".",
"subarc",
"(",
"b1",
".",
"to_angle",
",",
"a3",
".",
"from_angle",
")",
"c4",
"=",
"a2",
".",
"reversed",
"(",
")",
"intersection",
"=",
"VennArcgonRegion",
"(",
"[",
"c1",
",",
"c2",
",",
"c3",
",",
"c4",
"]",
")",
"return",
"[",
"subtraction",
",",
"intersection",
"]",
"if",
"case_II_a",
"else",
"[",
"intersection",
",",
"subtraction",
"]",
"else",
":",
"# Case III. Yuck.",
"if",
"len",
"(",
"intersections",
"[",
"0",
"]",
")",
"==",
"0",
"or",
"len",
"(",
"intersections",
"[",
"1",
"]",
")",
"==",
"0",
":",
"# Case III.a)",
"x",
"=",
"0",
"if",
"len",
"(",
"intersections",
"[",
"0",
"]",
")",
"!=",
"0",
"else",
"1",
"y",
"=",
"1",
"-",
"x",
"if",
"len",
"(",
"intersections",
"[",
"x",
"]",
")",
"!=",
"2",
":",
"warnings",
".",
"warn",
"(",
"\"Numeric precision error during polyarc intersection, case IIIa. Expect wrong results.\"",
")",
"intersections",
"[",
"x",
"]",
"=",
"[",
"intersections",
"[",
"x",
"]",
"[",
"0",
"]",
",",
"intersections",
"[",
"x",
"]",
"[",
"0",
"]",
"]",
"# This way we'll at least produce some result, although it will probably be wrong",
"if",
"not",
"point_in_circle",
"(",
"self",
".",
"arcs",
"[",
"0",
"]",
".",
"start_point",
"(",
")",
",",
"center",
",",
"radius",
")",
":",
"# Case III.a.1)",
"# result_subtraction = {X from start to i, circle i to j (direction = negative), X j to end, Y}",
"a1",
"=",
"self",
".",
"arcs",
"[",
"x",
"]",
".",
"subarc_between_points",
"(",
"None",
",",
"intersections",
"[",
"x",
"]",
"[",
"0",
"]",
")",
"a2",
"=",
"Arc",
"(",
"center",
",",
"radius",
",",
"vector_angle_in_degrees",
"(",
"intersections",
"[",
"x",
"]",
"[",
"0",
"]",
"-",
"center",
")",
",",
"vector_angle_in_degrees",
"(",
"intersections",
"[",
"x",
"]",
"[",
"1",
"]",
"-",
"center",
")",
",",
"False",
")",
"a3",
"=",
"self",
".",
"arcs",
"[",
"x",
"]",
".",
"subarc_between_points",
"(",
"intersections",
"[",
"x",
"]",
"[",
"1",
"]",
",",
"None",
")",
"a4",
"=",
"self",
".",
"arcs",
"[",
"y",
"]",
"subtraction",
"=",
"VennArcgonRegion",
"(",
"[",
"a1",
",",
"a2",
",",
"a3",
",",
"a4",
"]",
")",
"# result_intersection = {X i to j, circle j to i (direction = positive)}",
"b1",
"=",
"self",
".",
"arcs",
"[",
"x",
"]",
".",
"subarc",
"(",
"a1",
".",
"to_angle",
",",
"a3",
".",
"from_angle",
")",
"b2",
"=",
"a2",
".",
"reversed",
"(",
")",
"intersection",
"=",
"VennArcgonRegion",
"(",
"[",
"b1",
",",
"b2",
"]",
")",
"return",
"[",
"subtraction",
",",
"intersection",
"]",
"else",
":",
"# Case III.a.2)",
"# result_subtraction = {X i to j, circle j to i negative}",
"a1",
"=",
"self",
".",
"arcs",
"[",
"x",
"]",
".",
"subarc_between_points",
"(",
"intersections",
"[",
"x",
"]",
"[",
"0",
"]",
",",
"intersections",
"[",
"x",
"]",
"[",
"1",
"]",
")",
"a2",
"=",
"Arc",
"(",
"center",
",",
"radius",
",",
"vector_angle_in_degrees",
"(",
"intersections",
"[",
"x",
"]",
"[",
"1",
"]",
"-",
"center",
")",
",",
"vector_angle_in_degrees",
"(",
"intersections",
"[",
"x",
"]",
"[",
"0",
"]",
"-",
"center",
")",
",",
"False",
")",
"subtraction",
"=",
"VennArcgonRegion",
"(",
"[",
"a1",
",",
"a2",
"]",
")",
"# result_intersection = {X 0 to i, circle i to j positive, X j to end, Y}",
"b1",
"=",
"self",
".",
"arcs",
"[",
"x",
"]",
".",
"subarc",
"(",
"None",
",",
"a1",
".",
"from_angle",
")",
"b2",
"=",
"a2",
".",
"reversed",
"(",
")",
"b3",
"=",
"self",
".",
"arcs",
"[",
"x",
"]",
".",
"subarc",
"(",
"a1",
".",
"to_angle",
",",
"None",
")",
"b4",
"=",
"self",
".",
"arcs",
"[",
"y",
"]",
"intersection",
"=",
"VennArcgonRegion",
"(",
"[",
"b1",
",",
"b2",
",",
"b3",
",",
"b4",
"]",
")",
"return",
"[",
"subtraction",
",",
"intersection",
"]",
"else",
":",
"# Case III.b)",
"if",
"len",
"(",
"intersections",
"[",
"0",
"]",
")",
"==",
"2",
"or",
"len",
"(",
"intersections",
"[",
"1",
"]",
")",
"==",
"2",
":",
"warnings",
".",
"warn",
"(",
"\"Numeric precision error during polyarc intersection, case IIIb. Expect wrong results.\"",
")",
"# One of the arcs must start outside the circle, call it x",
"x",
"=",
"0",
"if",
"not",
"point_in_circle",
"(",
"self",
".",
"arcs",
"[",
"0",
"]",
".",
"start_point",
"(",
")",
",",
"center",
",",
"radius",
")",
"else",
"1",
"y",
"=",
"1",
"-",
"x",
"a1",
"=",
"self",
".",
"arcs",
"[",
"x",
"]",
".",
"subarc_between_points",
"(",
"None",
",",
"intersections",
"[",
"x",
"]",
"[",
"0",
"]",
")",
"a2",
"=",
"Arc",
"(",
"center",
",",
"radius",
",",
"vector_angle_in_degrees",
"(",
"intersections",
"[",
"x",
"]",
"[",
"0",
"]",
"-",
"center",
")",
",",
"vector_angle_in_degrees",
"(",
"intersections",
"[",
"y",
"]",
"[",
"0",
"]",
"-",
"center",
")",
",",
"False",
")",
"a3",
"=",
"self",
".",
"arcs",
"[",
"y",
"]",
".",
"subarc_between_points",
"(",
"intersections",
"[",
"y",
"]",
"[",
"0",
"]",
",",
"None",
")",
"subtraction",
"=",
"VennArcgonRegion",
"(",
"[",
"a1",
",",
"a2",
",",
"a3",
"]",
")",
"b1",
"=",
"self",
".",
"arcs",
"[",
"x",
"]",
".",
"subarc",
"(",
"a1",
".",
"to_angle",
",",
"None",
")",
"b2",
"=",
"self",
".",
"arcs",
"[",
"y",
"]",
".",
"subarc",
"(",
"None",
",",
"a3",
".",
"from_angle",
")",
"b3",
"=",
"a2",
".",
"reversed",
"(",
")",
"intersection",
"=",
"VennArcgonRegion",
"(",
"[",
"b1",
",",
"b2",
",",
"b3",
"]",
")",
"return",
"[",
"subtraction",
",",
"intersection",
"]"
]
| 59.864407 | 31.062147 |
def grouper(iterable, n, fillvalue=None):
"""Group iterable by n elements.
>>> for t in grouper('abcdefg', 3, fillvalue='x'):
... print(''.join(t))
abc
def
gxx
"""
return list(zip_longest(*[iter(iterable)] * n, fillvalue=fillvalue)) | [
"def",
"grouper",
"(",
"iterable",
",",
"n",
",",
"fillvalue",
"=",
"None",
")",
":",
"return",
"list",
"(",
"zip_longest",
"(",
"*",
"[",
"iter",
"(",
"iterable",
")",
"]",
"*",
"n",
",",
"fillvalue",
"=",
"fillvalue",
")",
")"
]
| 26 | 19.7 |
def enrichment_from_msp(dfmsp, modification="Phospho (STY)"):
"""
Calculate relative enrichment of peptide modifications from modificationSpecificPeptides.txt.
Taking a modifiedsitepeptides ``DataFrame`` returns the relative enrichment of the specified
modification in the table.
The returned data columns are generated from the input data columns.
:param df: Pandas ``DataFrame`` of modificationSpecificPeptides
:return: Pandas ``DataFrame`` of percentage modifications in the supplied data.
"""
dfmsp['Modifications'] = np.array([modification in m for m in dfmsp['Modifications']])
dfmsp = dfmsp.set_index(['Modifications'])
dfmsp = dfmsp.filter(regex='Intensity ')
dfmsp[ dfmsp == 0] = np.nan
df_r = dfmsp.sum(axis=0, level=0)
modified = df_r.loc[True].values
total = df_r.sum(axis=0).values
enrichment = modified / total
return pd.DataFrame([enrichment], columns=dfmsp.columns, index=['% Enrichment']) | [
"def",
"enrichment_from_msp",
"(",
"dfmsp",
",",
"modification",
"=",
"\"Phospho (STY)\"",
")",
":",
"dfmsp",
"[",
"'Modifications'",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"modification",
"in",
"m",
"for",
"m",
"in",
"dfmsp",
"[",
"'Modifications'",
"]",
"]",
")",
"dfmsp",
"=",
"dfmsp",
".",
"set_index",
"(",
"[",
"'Modifications'",
"]",
")",
"dfmsp",
"=",
"dfmsp",
".",
"filter",
"(",
"regex",
"=",
"'Intensity '",
")",
"dfmsp",
"[",
"dfmsp",
"==",
"0",
"]",
"=",
"np",
".",
"nan",
"df_r",
"=",
"dfmsp",
".",
"sum",
"(",
"axis",
"=",
"0",
",",
"level",
"=",
"0",
")",
"modified",
"=",
"df_r",
".",
"loc",
"[",
"True",
"]",
".",
"values",
"total",
"=",
"df_r",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
".",
"values",
"enrichment",
"=",
"modified",
"/",
"total",
"return",
"pd",
".",
"DataFrame",
"(",
"[",
"enrichment",
"]",
",",
"columns",
"=",
"dfmsp",
".",
"columns",
",",
"index",
"=",
"[",
"'% Enrichment'",
"]",
")"
]
| 38.24 | 26.32 |
def integer(description, **kwargs) -> typing.Type:
"""Create a :class:`~doctor.types.Integer` type.
:param description: A description of the type.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.Integer`
"""
kwargs['description'] = description
return type('Integer', (Integer,), kwargs) | [
"def",
"integer",
"(",
"description",
",",
"*",
"*",
"kwargs",
")",
"->",
"typing",
".",
"Type",
":",
"kwargs",
"[",
"'description'",
"]",
"=",
"description",
"return",
"type",
"(",
"'Integer'",
",",
"(",
"Integer",
",",
")",
",",
"kwargs",
")"
]
| 37.444444 | 9.333333 |
def raise_(type_, value=None, traceback=None): # pylint: disable=W0613
"""
Does the same as ordinary ``raise`` with arguments do in Python 2.
But works in Python 3 (>= 3.3) also!
Please checkout README on https://github.com/9seconds/pep3134
to get an idea about possible pitfals. But short story is: please
be pretty carefull with tracebacks. If it is possible, use sys.exc_info
instead. But in most cases it will work as you expect.
"""
if type_.__traceback__ is not traceback:
raise type_.with_traceback(traceback)
raise type_ | [
"def",
"raise_",
"(",
"type_",
",",
"value",
"=",
"None",
",",
"traceback",
"=",
"None",
")",
":",
"# pylint: disable=W0613",
"if",
"type_",
".",
"__traceback__",
"is",
"not",
"traceback",
":",
"raise",
"type_",
".",
"with_traceback",
"(",
"traceback",
")",
"raise",
"type_"
]
| 40.428571 | 20.142857 |
def is_denied(self, role, method, resource):
"""Check wherther role is denied to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked.
"""
return (role, method, resource) in self._denied | [
"def",
"is_denied",
"(",
"self",
",",
"role",
",",
"method",
",",
"resource",
")",
":",
"return",
"(",
"role",
",",
"method",
",",
"resource",
")",
"in",
"self",
".",
"_denied"
]
| 38.25 | 9.5 |
def main():
'''
main function.
'''
args = parse_args()
if args.multi_thread:
enable_multi_thread()
if args.advisor_class_name:
# advisor is enabled and starts to run
if args.multi_phase:
raise AssertionError('multi_phase has not been supported in advisor')
if args.advisor_class_name in AdvisorModuleName:
dispatcher = create_builtin_class_instance(
args.advisor_class_name,
args.advisor_args, True)
else:
dispatcher = create_customized_class_instance(
args.advisor_directory,
args.advisor_class_filename,
args.advisor_class_name,
args.advisor_args)
if dispatcher is None:
raise AssertionError('Failed to create Advisor instance')
try:
dispatcher.run()
except Exception as exception:
logger.exception(exception)
raise
else:
# tuner (and assessor) is enabled and starts to run
tuner = None
assessor = None
if args.tuner_class_name in ModuleName:
tuner = create_builtin_class_instance(
args.tuner_class_name,
args.tuner_args)
else:
tuner = create_customized_class_instance(
args.tuner_directory,
args.tuner_class_filename,
args.tuner_class_name,
args.tuner_args)
if tuner is None:
raise AssertionError('Failed to create Tuner instance')
if args.assessor_class_name:
if args.assessor_class_name in ModuleName:
assessor = create_builtin_class_instance(
args.assessor_class_name,
args.assessor_args)
else:
assessor = create_customized_class_instance(
args.assessor_directory,
args.assessor_class_filename,
args.assessor_class_name,
args.assessor_args)
if assessor is None:
raise AssertionError('Failed to create Assessor instance')
if args.multi_phase:
dispatcher = MultiPhaseMsgDispatcher(tuner, assessor)
else:
dispatcher = MsgDispatcher(tuner, assessor)
try:
dispatcher.run()
tuner._on_exit()
if assessor is not None:
assessor._on_exit()
except Exception as exception:
logger.exception(exception)
tuner._on_error()
if assessor is not None:
assessor._on_error()
raise | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"parse_args",
"(",
")",
"if",
"args",
".",
"multi_thread",
":",
"enable_multi_thread",
"(",
")",
"if",
"args",
".",
"advisor_class_name",
":",
"# advisor is enabled and starts to run",
"if",
"args",
".",
"multi_phase",
":",
"raise",
"AssertionError",
"(",
"'multi_phase has not been supported in advisor'",
")",
"if",
"args",
".",
"advisor_class_name",
"in",
"AdvisorModuleName",
":",
"dispatcher",
"=",
"create_builtin_class_instance",
"(",
"args",
".",
"advisor_class_name",
",",
"args",
".",
"advisor_args",
",",
"True",
")",
"else",
":",
"dispatcher",
"=",
"create_customized_class_instance",
"(",
"args",
".",
"advisor_directory",
",",
"args",
".",
"advisor_class_filename",
",",
"args",
".",
"advisor_class_name",
",",
"args",
".",
"advisor_args",
")",
"if",
"dispatcher",
"is",
"None",
":",
"raise",
"AssertionError",
"(",
"'Failed to create Advisor instance'",
")",
"try",
":",
"dispatcher",
".",
"run",
"(",
")",
"except",
"Exception",
"as",
"exception",
":",
"logger",
".",
"exception",
"(",
"exception",
")",
"raise",
"else",
":",
"# tuner (and assessor) is enabled and starts to run",
"tuner",
"=",
"None",
"assessor",
"=",
"None",
"if",
"args",
".",
"tuner_class_name",
"in",
"ModuleName",
":",
"tuner",
"=",
"create_builtin_class_instance",
"(",
"args",
".",
"tuner_class_name",
",",
"args",
".",
"tuner_args",
")",
"else",
":",
"tuner",
"=",
"create_customized_class_instance",
"(",
"args",
".",
"tuner_directory",
",",
"args",
".",
"tuner_class_filename",
",",
"args",
".",
"tuner_class_name",
",",
"args",
".",
"tuner_args",
")",
"if",
"tuner",
"is",
"None",
":",
"raise",
"AssertionError",
"(",
"'Failed to create Tuner instance'",
")",
"if",
"args",
".",
"assessor_class_name",
":",
"if",
"args",
".",
"assessor_class_name",
"in",
"ModuleName",
":",
"assessor",
"=",
"create_builtin_class_instance",
"(",
"args",
".",
"assessor_class_name",
",",
"args",
".",
"assessor_args",
")",
"else",
":",
"assessor",
"=",
"create_customized_class_instance",
"(",
"args",
".",
"assessor_directory",
",",
"args",
".",
"assessor_class_filename",
",",
"args",
".",
"assessor_class_name",
",",
"args",
".",
"assessor_args",
")",
"if",
"assessor",
"is",
"None",
":",
"raise",
"AssertionError",
"(",
"'Failed to create Assessor instance'",
")",
"if",
"args",
".",
"multi_phase",
":",
"dispatcher",
"=",
"MultiPhaseMsgDispatcher",
"(",
"tuner",
",",
"assessor",
")",
"else",
":",
"dispatcher",
"=",
"MsgDispatcher",
"(",
"tuner",
",",
"assessor",
")",
"try",
":",
"dispatcher",
".",
"run",
"(",
")",
"tuner",
".",
"_on_exit",
"(",
")",
"if",
"assessor",
"is",
"not",
"None",
":",
"assessor",
".",
"_on_exit",
"(",
")",
"except",
"Exception",
"as",
"exception",
":",
"logger",
".",
"exception",
"(",
"exception",
")",
"tuner",
".",
"_on_error",
"(",
")",
"if",
"assessor",
"is",
"not",
"None",
":",
"assessor",
".",
"_on_error",
"(",
")",
"raise"
]
| 33.717949 | 15.384615 |
def index():
"""Basic test view."""
identity = g.identity
actions = {}
for action in access.actions.values():
actions[action.value] = DynamicPermission(action).allows(identity)
if current_user.is_anonymous:
return render_template("invenio_access/open.html",
actions=actions,
identity=identity)
else:
return render_template("invenio_access/limited.html",
message='',
actions=actions,
identity=identity) | [
"def",
"index",
"(",
")",
":",
"identity",
"=",
"g",
".",
"identity",
"actions",
"=",
"{",
"}",
"for",
"action",
"in",
"access",
".",
"actions",
".",
"values",
"(",
")",
":",
"actions",
"[",
"action",
".",
"value",
"]",
"=",
"DynamicPermission",
"(",
"action",
")",
".",
"allows",
"(",
"identity",
")",
"if",
"current_user",
".",
"is_anonymous",
":",
"return",
"render_template",
"(",
"\"invenio_access/open.html\"",
",",
"actions",
"=",
"actions",
",",
"identity",
"=",
"identity",
")",
"else",
":",
"return",
"render_template",
"(",
"\"invenio_access/limited.html\"",
",",
"message",
"=",
"''",
",",
"actions",
"=",
"actions",
",",
"identity",
"=",
"identity",
")"
]
| 36.875 | 15.875 |
def normalizeGlyphUnicodes(value):
"""
Normalizes glyph unicodes.
* **value** must be a ``list``.
* **value** items must normalize as glyph unicodes with
:func:`normalizeGlyphUnicode`.
* **value** must not repeat unicode values.
* Returned value will be a ``tuple`` of ints.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Glyph unicodes must be a list, not %s."
% type(value).__name__)
values = [normalizeGlyphUnicode(v) for v in value]
duplicates = [v for v, count in Counter(value).items() if count > 1]
if len(duplicates) != 0:
raise ValueError("Duplicate unicode values are not allowed.")
return tuple(values) | [
"def",
"normalizeGlyphUnicodes",
"(",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Glyph unicodes must be a list, not %s.\"",
"%",
"type",
"(",
"value",
")",
".",
"__name__",
")",
"values",
"=",
"[",
"normalizeGlyphUnicode",
"(",
"v",
")",
"for",
"v",
"in",
"value",
"]",
"duplicates",
"=",
"[",
"v",
"for",
"v",
",",
"count",
"in",
"Counter",
"(",
"value",
")",
".",
"items",
"(",
")",
"if",
"count",
">",
"1",
"]",
"if",
"len",
"(",
"duplicates",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Duplicate unicode values are not allowed.\"",
")",
"return",
"tuple",
"(",
"values",
")"
]
| 39.222222 | 13.222222 |
def set_extension(self, ext):
"""
RETURN NEW FILE WITH GIVEN EXTENSION
"""
path = self._filename.split("/")
parts = path[-1].split(".")
if len(parts) == 1:
parts.append(ext)
else:
parts[-1] = ext
path[-1] = ".".join(parts)
return File("/".join(path)) | [
"def",
"set_extension",
"(",
"self",
",",
"ext",
")",
":",
"path",
"=",
"self",
".",
"_filename",
".",
"split",
"(",
"\"/\"",
")",
"parts",
"=",
"path",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\".\"",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
":",
"parts",
".",
"append",
"(",
"ext",
")",
"else",
":",
"parts",
"[",
"-",
"1",
"]",
"=",
"ext",
"path",
"[",
"-",
"1",
"]",
"=",
"\".\"",
".",
"join",
"(",
"parts",
")",
"return",
"File",
"(",
"\"/\"",
".",
"join",
"(",
"path",
")",
")"
]
| 25.769231 | 10.384615 |
def set_soup(self):
"""Sets soup and strips items"""
self.soup = BeautifulSoup(self.feed_content, "html.parser")
for item in self.soup.findAll('item'):
item.decompose()
for image in self.soup.findAll('image'):
image.decompose() | [
"def",
"set_soup",
"(",
"self",
")",
":",
"self",
".",
"soup",
"=",
"BeautifulSoup",
"(",
"self",
".",
"feed_content",
",",
"\"html.parser\"",
")",
"for",
"item",
"in",
"self",
".",
"soup",
".",
"findAll",
"(",
"'item'",
")",
":",
"item",
".",
"decompose",
"(",
")",
"for",
"image",
"in",
"self",
".",
"soup",
".",
"findAll",
"(",
"'image'",
")",
":",
"image",
".",
"decompose",
"(",
")"
]
| 39.571429 | 12.142857 |
def parse_workflow_call_body_declarations(self, i):
"""
Have not seen this used, so expects to return "[]".
:param i:
:return:
"""
declaration_array = []
if isinstance(i, wdl_parser.Terminal):
declaration_array = [i.source_string]
elif isinstance(i, wdl_parser.Ast):
raise NotImplementedError
elif isinstance(i, wdl_parser.AstList):
for ast in i:
declaration_array.append(self.parse_task_declaration(ast))
# have not seen this used so raise to check
if declaration_array:
raise NotImplementedError
return declaration_array | [
"def",
"parse_workflow_call_body_declarations",
"(",
"self",
",",
"i",
")",
":",
"declaration_array",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"i",
",",
"wdl_parser",
".",
"Terminal",
")",
":",
"declaration_array",
"=",
"[",
"i",
".",
"source_string",
"]",
"elif",
"isinstance",
"(",
"i",
",",
"wdl_parser",
".",
"Ast",
")",
":",
"raise",
"NotImplementedError",
"elif",
"isinstance",
"(",
"i",
",",
"wdl_parser",
".",
"AstList",
")",
":",
"for",
"ast",
"in",
"i",
":",
"declaration_array",
".",
"append",
"(",
"self",
".",
"parse_task_declaration",
"(",
"ast",
")",
")",
"# have not seen this used so raise to check",
"if",
"declaration_array",
":",
"raise",
"NotImplementedError",
"return",
"declaration_array"
]
| 31.666667 | 15.095238 |
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
"""
location = getattr(path_spec, 'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return False
if len(location) == 1:
return True
try:
self._tar_file.getmember(location[1:])
return True
except KeyError:
pass
# Check if location could be a virtual directory.
for name in iter(self._tar_file.getnames()):
# The TAR info name does not have the leading path separator as
# the location string does.
if name.startswith(location[1:]):
return True
return False | [
"def",
"FileEntryExistsByPathSpec",
"(",
"self",
",",
"path_spec",
")",
":",
"location",
"=",
"getattr",
"(",
"path_spec",
",",
"'location'",
",",
"None",
")",
"if",
"(",
"location",
"is",
"None",
"or",
"not",
"location",
".",
"startswith",
"(",
"self",
".",
"LOCATION_ROOT",
")",
")",
":",
"return",
"False",
"if",
"len",
"(",
"location",
")",
"==",
"1",
":",
"return",
"True",
"try",
":",
"self",
".",
"_tar_file",
".",
"getmember",
"(",
"location",
"[",
"1",
":",
"]",
")",
"return",
"True",
"except",
"KeyError",
":",
"pass",
"# Check if location could be a virtual directory.",
"for",
"name",
"in",
"iter",
"(",
"self",
".",
"_tar_file",
".",
"getnames",
"(",
")",
")",
":",
"# The TAR info name does not have the leading path separator as",
"# the location string does.",
"if",
"name",
".",
"startswith",
"(",
"location",
"[",
"1",
":",
"]",
")",
":",
"return",
"True",
"return",
"False"
]
| 24.9375 | 20.71875 |
def sample(self, n):
"""Samples data into a Pandas DataFrame. Note that it calls BigQuery so it will
incur cost.
Args:
n: number of sampled counts. Note that the number of counts returned is approximated.
Returns:
A dataframe containing sampled data.
Raises:
Exception if n is larger than number of rows.
"""
total = bq.Query('select count(*) from %s' %
self._get_source()).execute().result()[0].values()[0]
if n > total:
raise ValueError('sample larger than population')
sampling = bq.Sampling.random(percent=n * 100.0 / float(total))
if self._query is not None:
source = self._query
else:
source = 'SELECT * FROM `%s`' % self._table
sample = bq.Query(source).execute(sampling=sampling).result()
df = sample.to_dataframe()
return df | [
"def",
"sample",
"(",
"self",
",",
"n",
")",
":",
"total",
"=",
"bq",
".",
"Query",
"(",
"'select count(*) from %s'",
"%",
"self",
".",
"_get_source",
"(",
")",
")",
".",
"execute",
"(",
")",
".",
"result",
"(",
")",
"[",
"0",
"]",
".",
"values",
"(",
")",
"[",
"0",
"]",
"if",
"n",
">",
"total",
":",
"raise",
"ValueError",
"(",
"'sample larger than population'",
")",
"sampling",
"=",
"bq",
".",
"Sampling",
".",
"random",
"(",
"percent",
"=",
"n",
"*",
"100.0",
"/",
"float",
"(",
"total",
")",
")",
"if",
"self",
".",
"_query",
"is",
"not",
"None",
":",
"source",
"=",
"self",
".",
"_query",
"else",
":",
"source",
"=",
"'SELECT * FROM `%s`'",
"%",
"self",
".",
"_table",
"sample",
"=",
"bq",
".",
"Query",
"(",
"source",
")",
".",
"execute",
"(",
"sampling",
"=",
"sampling",
")",
".",
"result",
"(",
")",
"df",
"=",
"sample",
".",
"to_dataframe",
"(",
")",
"return",
"df"
]
| 36 | 20.26087 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.