repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
PmagPy/PmagPy | pmagpy/pmag.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L4364-L4374 | def vclose(L, V):
"""
gets the closest vector
"""
lam, X = 0, []
for k in range(3):
lam = lam + V[k] * L[k]
beta = np.sqrt(1. - lam**2)
for k in range(3):
X.append((old_div((V[k] - lam * L[k]), beta)))
return X | [
"def",
"vclose",
"(",
"L",
",",
"V",
")",
":",
"lam",
",",
"X",
"=",
"0",
",",
"[",
"]",
"for",
"k",
"in",
"range",
"(",
"3",
")",
":",
"lam",
"=",
"lam",
"+",
"V",
"[",
"k",
"]",
"*",
"L",
"[",
"k",
"]",
"beta",
"=",
"np",
".",
"sqrt",
"(",
"1.",
"-",
"lam",
"**",
"2",
")",
"for",
"k",
"in",
"range",
"(",
"3",
")",
":",
"X",
".",
"append",
"(",
"(",
"old_div",
"(",
"(",
"V",
"[",
"k",
"]",
"-",
"lam",
"*",
"L",
"[",
"k",
"]",
")",
",",
"beta",
")",
")",
")",
"return",
"X"
] | gets the closest vector | [
"gets",
"the",
"closest",
"vector"
] | python | train |
bokeh/bokeh | bokeh/server/util.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/server/util.py#L46-L73 | def bind_sockets(address, port):
''' Bind a socket to a port on an address.
Args:
address (str) :
An address to bind a port on, e.g. ``"localhost"``
port (int) :
A port number to bind.
Pass 0 to have the OS automatically choose a free port.
This function returns a 2-tuple with the new socket as the first element,
and the port that was bound as the second. (Useful when passing 0 as a port
number to bind any free port.)
Returns:
(socket, port)
'''
ss = netutil.bind_sockets(port=port or 0, address=address)
assert len(ss)
ports = {s.getsockname()[1] for s in ss}
assert len(ports) == 1, "Multiple ports assigned??"
actual_port = ports.pop()
if port:
assert actual_port == port
return ss, actual_port | [
"def",
"bind_sockets",
"(",
"address",
",",
"port",
")",
":",
"ss",
"=",
"netutil",
".",
"bind_sockets",
"(",
"port",
"=",
"port",
"or",
"0",
",",
"address",
"=",
"address",
")",
"assert",
"len",
"(",
"ss",
")",
"ports",
"=",
"{",
"s",
".",
"getsockname",
"(",
")",
"[",
"1",
"]",
"for",
"s",
"in",
"ss",
"}",
"assert",
"len",
"(",
"ports",
")",
"==",
"1",
",",
"\"Multiple ports assigned??\"",
"actual_port",
"=",
"ports",
".",
"pop",
"(",
")",
"if",
"port",
":",
"assert",
"actual_port",
"==",
"port",
"return",
"ss",
",",
"actual_port"
] | Bind a socket to a port on an address.
Args:
address (str) :
An address to bind a port on, e.g. ``"localhost"``
port (int) :
A port number to bind.
Pass 0 to have the OS automatically choose a free port.
This function returns a 2-tuple with the new socket as the first element,
and the port that was bound as the second. (Useful when passing 0 as a port
number to bind any free port.)
Returns:
(socket, port) | [
"Bind",
"a",
"socket",
"to",
"a",
"port",
"on",
"an",
"address",
"."
] | python | train |
deepmipt/DeepPavlov | deeppavlov/utils/alexa/ssl_tools.py | https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/ssl_tools.py#L61-L73 | def extract_certs(certs_txt: str) -> List[crypto.X509]:
"""Extracts pycrypto X509 objects from SSL certificates chain string.
Args:
certs_txt: SSL certificates chain string.
Returns:
result: List of pycrypto X509 objects.
"""
pattern = r'-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----'
certs_txt = re.findall(pattern, certs_txt, flags=re.DOTALL)
certs = [crypto.load_certificate(crypto.FILETYPE_PEM, cert_txt) for cert_txt in certs_txt]
return certs | [
"def",
"extract_certs",
"(",
"certs_txt",
":",
"str",
")",
"->",
"List",
"[",
"crypto",
".",
"X509",
"]",
":",
"pattern",
"=",
"r'-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----'",
"certs_txt",
"=",
"re",
".",
"findall",
"(",
"pattern",
",",
"certs_txt",
",",
"flags",
"=",
"re",
".",
"DOTALL",
")",
"certs",
"=",
"[",
"crypto",
".",
"load_certificate",
"(",
"crypto",
".",
"FILETYPE_PEM",
",",
"cert_txt",
")",
"for",
"cert_txt",
"in",
"certs_txt",
"]",
"return",
"certs"
] | Extracts pycrypto X509 objects from SSL certificates chain string.
Args:
certs_txt: SSL certificates chain string.
Returns:
result: List of pycrypto X509 objects. | [
"Extracts",
"pycrypto",
"X509",
"objects",
"from",
"SSL",
"certificates",
"chain",
"string",
"."
] | python | test |
openxc/openxc-python | openxc/controllers/base.py | https://github.com/openxc/openxc-python/blob/4becb4a6310bd658c125195ef6ffea4deaf7d7e7/openxc/controllers/base.py#L56-L65 | def wait_for_responses(self):
"""Block the thread and wait for the response to the given request to
arrive from the VI. If no matching response is received in
COMMAND_RESPONSE_TIMEOUT_S seconds, returns anyway.
"""
self.thread.join(self.COMMAND_RESPONSE_TIMEOUT_S)
self.running = False
return self.responses | [
"def",
"wait_for_responses",
"(",
"self",
")",
":",
"self",
".",
"thread",
".",
"join",
"(",
"self",
".",
"COMMAND_RESPONSE_TIMEOUT_S",
")",
"self",
".",
"running",
"=",
"False",
"return",
"self",
".",
"responses"
] | Block the thread and wait for the response to the given request to
arrive from the VI. If no matching response is received in
COMMAND_RESPONSE_TIMEOUT_S seconds, returns anyway. | [
"Block",
"the",
"thread",
"and",
"wait",
"for",
"the",
"response",
"to",
"the",
"given",
"request",
"to",
"arrive",
"from",
"the",
"VI",
".",
"If",
"no",
"matching",
"response",
"is",
"received",
"in",
"COMMAND_RESPONSE_TIMEOUT_S",
"seconds",
"returns",
"anyway",
"."
] | python | train |
thiagopbueno/pyrddl | pyrddl/parser.py | https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/parser.py#L516-L522 | def p_pvar_expr(self, p):
'''pvar_expr : IDENT LPAREN term_list RPAREN
| IDENT'''
if len(p) == 2:
p[0] = ('pvar_expr', (p[1], None))
elif len(p) == 5:
p[0] = ('pvar_expr', (p[1], p[3])) | [
"def",
"p_pvar_expr",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"(",
"'pvar_expr'",
",",
"(",
"p",
"[",
"1",
"]",
",",
"None",
")",
")",
"elif",
"len",
"(",
"p",
")",
"==",
"5",
":",
"p",
"[",
"0",
"]",
"=",
"(",
"'pvar_expr'",
",",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
")",
")"
] | pvar_expr : IDENT LPAREN term_list RPAREN
| IDENT | [
"pvar_expr",
":",
"IDENT",
"LPAREN",
"term_list",
"RPAREN",
"|",
"IDENT"
] | python | train |
wandb/client | wandb/vendor/prompt_toolkit/layout/containers.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/layout/containers.py#L1597-L1605 | def _scroll_down(self, cli):
" Scroll window down. "
info = self.render_info
if self.vertical_scroll < info.content_height - info.window_height:
if info.cursor_position.y <= info.configured_scroll_offsets.top:
self.content.move_cursor_down(cli)
self.vertical_scroll += 1 | [
"def",
"_scroll_down",
"(",
"self",
",",
"cli",
")",
":",
"info",
"=",
"self",
".",
"render_info",
"if",
"self",
".",
"vertical_scroll",
"<",
"info",
".",
"content_height",
"-",
"info",
".",
"window_height",
":",
"if",
"info",
".",
"cursor_position",
".",
"y",
"<=",
"info",
".",
"configured_scroll_offsets",
".",
"top",
":",
"self",
".",
"content",
".",
"move_cursor_down",
"(",
"cli",
")",
"self",
".",
"vertical_scroll",
"+=",
"1"
] | Scroll window down. | [
"Scroll",
"window",
"down",
"."
] | python | train |
aerogear/digger-build-cli | digger/base/build.py | https://github.com/aerogear/digger-build-cli/blob/8b88a31063526ec7222dbea6a87309686ad21320/digger/base/build.py#L63-L81 | def from_zip(cls, src='/tmp/app.zip', dest='/app'):
"""
Unzips a zipped app project file and instantiates it.
:param src: zipfile path
:param dest: destination folder to extract the zipfile content
Returns
A project instance.
"""
try:
zf = zipfile.ZipFile(src, 'r')
except FileNotFoundError:
raise errors.InvalidPathError(src)
except zipfile.BadZipFile:
raise errors.InvalidZipFileError(src)
[zf.extract(file, dest) for file in zf.namelist()]
zf.close()
return cls.from_path(dest) | [
"def",
"from_zip",
"(",
"cls",
",",
"src",
"=",
"'/tmp/app.zip'",
",",
"dest",
"=",
"'/app'",
")",
":",
"try",
":",
"zf",
"=",
"zipfile",
".",
"ZipFile",
"(",
"src",
",",
"'r'",
")",
"except",
"FileNotFoundError",
":",
"raise",
"errors",
".",
"InvalidPathError",
"(",
"src",
")",
"except",
"zipfile",
".",
"BadZipFile",
":",
"raise",
"errors",
".",
"InvalidZipFileError",
"(",
"src",
")",
"[",
"zf",
".",
"extract",
"(",
"file",
",",
"dest",
")",
"for",
"file",
"in",
"zf",
".",
"namelist",
"(",
")",
"]",
"zf",
".",
"close",
"(",
")",
"return",
"cls",
".",
"from_path",
"(",
"dest",
")"
] | Unzips a zipped app project file and instantiates it.
:param src: zipfile path
:param dest: destination folder to extract the zipfile content
Returns
A project instance. | [
"Unzips",
"a",
"zipped",
"app",
"project",
"file",
"and",
"instantiates",
"it",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/gce.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/gce.py#L728-L807 | def create_subnetwork(kwargs=None, call=None):
'''
... versionadded:: 2017.7.0
Create a GCE Subnetwork. Must specify name, cidr, network, and region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_subnetwork gce name=mysubnet network=mynet1 region=us-west1 cidr=10.0.0.0/24 description=optional
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_subnetwork function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'Must specify name of subnet.'
)
return False
if 'network' not in kwargs:
log.errror(
'Must specify name of network to create subnet under.'
)
return False
if 'cidr' not in kwargs:
log.errror(
'A network CIDR range must be specified when creating a subnet.'
)
return False
if 'region' not in kwargs:
log.error(
'A region must be specified when creating a subnetwork.'
)
return False
name = kwargs['name']
cidr = kwargs['cidr']
network = kwargs['network']
region = kwargs['region']
desc = kwargs.get('description', None)
conn = get_conn()
__utils__['cloud.fire_event'](
'event',
'create subnetwork',
'salt/cloud/subnet/creating',
args={
'name': name,
'network': network,
'cidr': cidr,
'region': region,
'description': desc
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
subnet = conn.ex_create_subnetwork(name, cidr, network, region, desc)
__utils__['cloud.fire_event'](
'event',
'created subnetwork',
'salt/cloud/subnet/created',
args={
'name': name,
'network': network,
'cidr': cidr,
'region': region,
'description': desc
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return _expand_item(subnet) | [
"def",
"create_subnetwork",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The create_subnetwork function must be called with -f or --function.'",
")",
"if",
"not",
"kwargs",
"or",
"'name'",
"not",
"in",
"kwargs",
":",
"log",
".",
"error",
"(",
"'Must specify name of subnet.'",
")",
"return",
"False",
"if",
"'network'",
"not",
"in",
"kwargs",
":",
"log",
".",
"errror",
"(",
"'Must specify name of network to create subnet under.'",
")",
"return",
"False",
"if",
"'cidr'",
"not",
"in",
"kwargs",
":",
"log",
".",
"errror",
"(",
"'A network CIDR range must be specified when creating a subnet.'",
")",
"return",
"False",
"if",
"'region'",
"not",
"in",
"kwargs",
":",
"log",
".",
"error",
"(",
"'A region must be specified when creating a subnetwork.'",
")",
"return",
"False",
"name",
"=",
"kwargs",
"[",
"'name'",
"]",
"cidr",
"=",
"kwargs",
"[",
"'cidr'",
"]",
"network",
"=",
"kwargs",
"[",
"'network'",
"]",
"region",
"=",
"kwargs",
"[",
"'region'",
"]",
"desc",
"=",
"kwargs",
".",
"get",
"(",
"'description'",
",",
"None",
")",
"conn",
"=",
"get_conn",
"(",
")",
"__utils__",
"[",
"'cloud.fire_event'",
"]",
"(",
"'event'",
",",
"'create subnetwork'",
",",
"'salt/cloud/subnet/creating'",
",",
"args",
"=",
"{",
"'name'",
":",
"name",
",",
"'network'",
":",
"network",
",",
"'cidr'",
":",
"cidr",
",",
"'region'",
":",
"region",
",",
"'description'",
":",
"desc",
"}",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'transport'",
"]",
")",
"subnet",
"=",
"conn",
".",
"ex_create_subnetwork",
"(",
"name",
",",
"cidr",
",",
"network",
",",
"region",
",",
"desc",
")",
"__utils__",
"[",
"'cloud.fire_event'",
"]",
"(",
"'event'",
",",
"'created subnetwork'",
",",
"'salt/cloud/subnet/created'",
",",
"args",
"=",
"{",
"'name'",
":",
"name",
",",
"'network'",
":",
"network",
",",
"'cidr'",
":",
"cidr",
",",
"'region'",
":",
"region",
",",
"'description'",
":",
"desc",
"}",
",",
"sock_dir",
"=",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"transport",
"=",
"__opts__",
"[",
"'transport'",
"]",
")",
"return",
"_expand_item",
"(",
"subnet",
")"
] | ... versionadded:: 2017.7.0
Create a GCE Subnetwork. Must specify name, cidr, network, and region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_subnetwork gce name=mysubnet network=mynet1 region=us-west1 cidr=10.0.0.0/24 description=optional | [
"...",
"versionadded",
"::",
"2017",
".",
"7",
".",
"0",
"Create",
"a",
"GCE",
"Subnetwork",
".",
"Must",
"specify",
"name",
"cidr",
"network",
"and",
"region",
"."
] | python | train |
quantumlib/Cirq | cirq/protocols/pow.py | https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/protocols/pow.py#L66-L103 | def pow(val: Any,
exponent: Any,
default: Any = RaiseTypeErrorIfNotProvided) -> Any:
"""Returns `val**factor` of the given value, if defined.
Values define an extrapolation by defining a __pow__(self, exponent) method.
Note that the method may return NotImplemented to indicate a particular
extrapolation can't be done.
Args:
val: The value or iterable of values to invert.
exponent: The extrapolation factor. For example, if this is 0.5 and val
is a gate then the caller is asking for a square root of the gate.
default: Determines the fallback behavior when `val` doesn't have
an extrapolation defined. If `default` is not set and that occurs,
a TypeError is raised instead.
Returns:
If `val` has a __pow__ method that returns something besides
NotImplemented, that result is returned. Otherwise, if a default value
was specified, the default value is returned.
Raises:
TypeError: `val` doesn't have a __pow__ method (or that method returned
NotImplemented) and no `default` value was specified.
"""
raiser = getattr(val, '__pow__', None)
result = NotImplemented if raiser is None else raiser(exponent)
if result is not NotImplemented:
return result
if default is not RaiseTypeErrorIfNotProvided:
return default
if raiser is None:
raise TypeError("object of type '{}' "
"has no __pow__ method.".format(type(val)))
raise TypeError("object of type '{}' does have a __pow__ method, "
"but it returned NotImplemented.".format(type(val))) | [
"def",
"pow",
"(",
"val",
":",
"Any",
",",
"exponent",
":",
"Any",
",",
"default",
":",
"Any",
"=",
"RaiseTypeErrorIfNotProvided",
")",
"->",
"Any",
":",
"raiser",
"=",
"getattr",
"(",
"val",
",",
"'__pow__'",
",",
"None",
")",
"result",
"=",
"NotImplemented",
"if",
"raiser",
"is",
"None",
"else",
"raiser",
"(",
"exponent",
")",
"if",
"result",
"is",
"not",
"NotImplemented",
":",
"return",
"result",
"if",
"default",
"is",
"not",
"RaiseTypeErrorIfNotProvided",
":",
"return",
"default",
"if",
"raiser",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"object of type '{}' \"",
"\"has no __pow__ method.\"",
".",
"format",
"(",
"type",
"(",
"val",
")",
")",
")",
"raise",
"TypeError",
"(",
"\"object of type '{}' does have a __pow__ method, \"",
"\"but it returned NotImplemented.\"",
".",
"format",
"(",
"type",
"(",
"val",
")",
")",
")"
] | Returns `val**factor` of the given value, if defined.
Values define an extrapolation by defining a __pow__(self, exponent) method.
Note that the method may return NotImplemented to indicate a particular
extrapolation can't be done.
Args:
val: The value or iterable of values to invert.
exponent: The extrapolation factor. For example, if this is 0.5 and val
is a gate then the caller is asking for a square root of the gate.
default: Determines the fallback behavior when `val` doesn't have
an extrapolation defined. If `default` is not set and that occurs,
a TypeError is raised instead.
Returns:
If `val` has a __pow__ method that returns something besides
NotImplemented, that result is returned. Otherwise, if a default value
was specified, the default value is returned.
Raises:
TypeError: `val` doesn't have a __pow__ method (or that method returned
NotImplemented) and no `default` value was specified. | [
"Returns",
"val",
"**",
"factor",
"of",
"the",
"given",
"value",
"if",
"defined",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/gsim/chiou_youngs_2014.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/chiou_youngs_2014.py#L238-L252 | def _get_centered_ztor(self, rup, Frv):
"""
Get ztor centered on the M- dependent avarage ztor(km)
by different fault types.
"""
if Frv == 1:
mean_ztor = max(2.704 - 1.226 * max(rup.mag - 5.849, 0.0), 0.) ** 2
centered_ztor = rup.ztor - mean_ztor
else:
mean_ztor = max(2.673 - 1.136 * max(rup.mag - 4.970, 0.0), 0.) ** 2
centered_ztor = rup.ztor - mean_ztor
return centered_ztor | [
"def",
"_get_centered_ztor",
"(",
"self",
",",
"rup",
",",
"Frv",
")",
":",
"if",
"Frv",
"==",
"1",
":",
"mean_ztor",
"=",
"max",
"(",
"2.704",
"-",
"1.226",
"*",
"max",
"(",
"rup",
".",
"mag",
"-",
"5.849",
",",
"0.0",
")",
",",
"0.",
")",
"**",
"2",
"centered_ztor",
"=",
"rup",
".",
"ztor",
"-",
"mean_ztor",
"else",
":",
"mean_ztor",
"=",
"max",
"(",
"2.673",
"-",
"1.136",
"*",
"max",
"(",
"rup",
".",
"mag",
"-",
"4.970",
",",
"0.0",
")",
",",
"0.",
")",
"**",
"2",
"centered_ztor",
"=",
"rup",
".",
"ztor",
"-",
"mean_ztor",
"return",
"centered_ztor"
] | Get ztor centered on the M- dependent avarage ztor(km)
by different fault types. | [
"Get",
"ztor",
"centered",
"on",
"the",
"M",
"-",
"dependent",
"avarage",
"ztor",
"(",
"km",
")",
"by",
"different",
"fault",
"types",
"."
] | python | train |
mattjj/pyhsmm | pyhsmm/models.py | https://github.com/mattjj/pyhsmm/blob/a9a39c2bfd539048e35877cb13283552eadc24e2/pyhsmm/models.py#L664-L678 | def BIC(self,data=None):
'''
BIC on the passed data. If passed data is None (default), calculates BIC
on the model's assigned data
'''
# NOTE: in principle this method computes the BIC only after finding the
# maximum likelihood parameters (or, of course, an EM fixed-point as an
# approximation!)
assert data is None and len(self.states_list) > 0, 'Must have data to get BIC'
if data is None:
return -2*sum(self.log_likelihood(s.data).sum() for s in self.states_list) + \
self.num_parameters() * np.log(
sum(s.data.shape[0] for s in self.states_list))
else:
return -2*self.log_likelihood(data) + self.num_parameters() * np.log(data.shape[0]) | [
"def",
"BIC",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"# NOTE: in principle this method computes the BIC only after finding the",
"# maximum likelihood parameters (or, of course, an EM fixed-point as an",
"# approximation!)",
"assert",
"data",
"is",
"None",
"and",
"len",
"(",
"self",
".",
"states_list",
")",
">",
"0",
",",
"'Must have data to get BIC'",
"if",
"data",
"is",
"None",
":",
"return",
"-",
"2",
"*",
"sum",
"(",
"self",
".",
"log_likelihood",
"(",
"s",
".",
"data",
")",
".",
"sum",
"(",
")",
"for",
"s",
"in",
"self",
".",
"states_list",
")",
"+",
"self",
".",
"num_parameters",
"(",
")",
"*",
"np",
".",
"log",
"(",
"sum",
"(",
"s",
".",
"data",
".",
"shape",
"[",
"0",
"]",
"for",
"s",
"in",
"self",
".",
"states_list",
")",
")",
"else",
":",
"return",
"-",
"2",
"*",
"self",
".",
"log_likelihood",
"(",
"data",
")",
"+",
"self",
".",
"num_parameters",
"(",
")",
"*",
"np",
".",
"log",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")"
] | BIC on the passed data. If passed data is None (default), calculates BIC
on the model's assigned data | [
"BIC",
"on",
"the",
"passed",
"data",
".",
"If",
"passed",
"data",
"is",
"None",
"(",
"default",
")",
"calculates",
"BIC",
"on",
"the",
"model",
"s",
"assigned",
"data"
] | python | train |
seleniumbase/SeleniumBase | seleniumbase/fixtures/base_case.py | https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L1028-L1088 | def add_tour_step(self, message, selector=None, name=None,
title=None, theme=None, alignment=None, duration=None):
""" Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
theme - (NON-Bootstrap Tours ONLY) The styling of the tour step.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("arrows" is used if None is selected.)
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
duration - (Bootstrap Tours ONLY) The amount of time, in seconds,
before automatically advancing to the next tour step.
"""
if not selector:
selector = "html"
if page_utils.is_xpath_selector(selector):
selector = self.convert_to_css_selector(selector, By.XPATH)
selector = self.__escape_quotes_if_needed(selector)
if not name:
name = "default"
if name not in self._tour_steps:
# By default, will create an IntroJS tour if no tours exist
self.create_tour(name=name, theme="introjs")
if not title:
title = ""
title = self.__escape_quotes_if_needed(title)
if message:
message = self.__escape_quotes_if_needed(message)
else:
message = ""
if not alignment or (
alignment not in ["top", "bottom", "left", "right"]):
if "Hopscotch" not in self._tour_steps[name][0]:
alignment = "top"
else:
alignment = "bottom"
if "Bootstrap" in self._tour_steps[name][0]:
self.__add_bootstrap_tour_step(
message, selector=selector, name=name, title=title,
alignment=alignment, duration=duration)
elif "Hopscotch" in self._tour_steps[name][0]:
self.__add_hopscotch_tour_step(
message, selector=selector, name=name, title=title,
alignment=alignment)
elif "IntroJS" in self._tour_steps[name][0]:
self.__add_introjs_tour_step(
message, selector=selector, name=name, title=title,
alignment=alignment)
else:
self.__add_shepherd_tour_step(
message, selector=selector, name=name, title=title,
theme=theme, alignment=alignment) | [
"def",
"add_tour_step",
"(",
"self",
",",
"message",
",",
"selector",
"=",
"None",
",",
"name",
"=",
"None",
",",
"title",
"=",
"None",
",",
"theme",
"=",
"None",
",",
"alignment",
"=",
"None",
",",
"duration",
"=",
"None",
")",
":",
"if",
"not",
"selector",
":",
"selector",
"=",
"\"html\"",
"if",
"page_utils",
".",
"is_xpath_selector",
"(",
"selector",
")",
":",
"selector",
"=",
"self",
".",
"convert_to_css_selector",
"(",
"selector",
",",
"By",
".",
"XPATH",
")",
"selector",
"=",
"self",
".",
"__escape_quotes_if_needed",
"(",
"selector",
")",
"if",
"not",
"name",
":",
"name",
"=",
"\"default\"",
"if",
"name",
"not",
"in",
"self",
".",
"_tour_steps",
":",
"# By default, will create an IntroJS tour if no tours exist",
"self",
".",
"create_tour",
"(",
"name",
"=",
"name",
",",
"theme",
"=",
"\"introjs\"",
")",
"if",
"not",
"title",
":",
"title",
"=",
"\"\"",
"title",
"=",
"self",
".",
"__escape_quotes_if_needed",
"(",
"title",
")",
"if",
"message",
":",
"message",
"=",
"self",
".",
"__escape_quotes_if_needed",
"(",
"message",
")",
"else",
":",
"message",
"=",
"\"\"",
"if",
"not",
"alignment",
"or",
"(",
"alignment",
"not",
"in",
"[",
"\"top\"",
",",
"\"bottom\"",
",",
"\"left\"",
",",
"\"right\"",
"]",
")",
":",
"if",
"\"Hopscotch\"",
"not",
"in",
"self",
".",
"_tour_steps",
"[",
"name",
"]",
"[",
"0",
"]",
":",
"alignment",
"=",
"\"top\"",
"else",
":",
"alignment",
"=",
"\"bottom\"",
"if",
"\"Bootstrap\"",
"in",
"self",
".",
"_tour_steps",
"[",
"name",
"]",
"[",
"0",
"]",
":",
"self",
".",
"__add_bootstrap_tour_step",
"(",
"message",
",",
"selector",
"=",
"selector",
",",
"name",
"=",
"name",
",",
"title",
"=",
"title",
",",
"alignment",
"=",
"alignment",
",",
"duration",
"=",
"duration",
")",
"elif",
"\"Hopscotch\"",
"in",
"self",
".",
"_tour_steps",
"[",
"name",
"]",
"[",
"0",
"]",
":",
"self",
".",
"__add_hopscotch_tour_step",
"(",
"message",
",",
"selector",
"=",
"selector",
",",
"name",
"=",
"name",
",",
"title",
"=",
"title",
",",
"alignment",
"=",
"alignment",
")",
"elif",
"\"IntroJS\"",
"in",
"self",
".",
"_tour_steps",
"[",
"name",
"]",
"[",
"0",
"]",
":",
"self",
".",
"__add_introjs_tour_step",
"(",
"message",
",",
"selector",
"=",
"selector",
",",
"name",
"=",
"name",
",",
"title",
"=",
"title",
",",
"alignment",
"=",
"alignment",
")",
"else",
":",
"self",
".",
"__add_shepherd_tour_step",
"(",
"message",
",",
"selector",
"=",
"selector",
",",
"name",
"=",
"name",
",",
"title",
"=",
"title",
",",
"theme",
"=",
"theme",
",",
"alignment",
"=",
"alignment",
")"
] | Allows the user to add tour steps for a website.
@Params
message - The message to display.
selector - The CSS Selector of the Element to attach to.
name - If creating multiple tours at the same time,
use this to select the tour you wish to add steps to.
title - Additional header text that appears above the message.
theme - (NON-Bootstrap Tours ONLY) The styling of the tour step.
Choose from "light"/"arrows", "dark", "default", "square",
and "square-dark". ("arrows" is used if None is selected.)
alignment - Choose from "top", "bottom", "left", and "right".
("top" is the default alignment).
duration - (Bootstrap Tours ONLY) The amount of time, in seconds,
before automatically advancing to the next tour step. | [
"Allows",
"the",
"user",
"to",
"add",
"tour",
"steps",
"for",
"a",
"website",
"."
] | python | train |
mbr/simplekv | simplekv/__init__.py | https://github.com/mbr/simplekv/blob/fc46ee0b8ca9b071d6699f3f0f18a8e599a5a2d6/simplekv/__init__.py#L400-L416 | def put_file(self, key, file, ttl_secs=None):
"""Like :meth:`~simplekv.KeyValueStore.put_file`, but with an
additional parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid.
"""
if ttl_secs is None:
ttl_secs = self.default_ttl_secs
self._check_valid_key(key)
if isinstance(file, str):
return self._put_filename(key, file, self._valid_ttl(ttl_secs))
else:
return self._put_file(key, file, self._valid_ttl(ttl_secs)) | [
"def",
"put_file",
"(",
"self",
",",
"key",
",",
"file",
",",
"ttl_secs",
"=",
"None",
")",
":",
"if",
"ttl_secs",
"is",
"None",
":",
"ttl_secs",
"=",
"self",
".",
"default_ttl_secs",
"self",
".",
"_check_valid_key",
"(",
"key",
")",
"if",
"isinstance",
"(",
"file",
",",
"str",
")",
":",
"return",
"self",
".",
"_put_filename",
"(",
"key",
",",
"file",
",",
"self",
".",
"_valid_ttl",
"(",
"ttl_secs",
")",
")",
"else",
":",
"return",
"self",
".",
"_put_file",
"(",
"key",
",",
"file",
",",
"self",
".",
"_valid_ttl",
"(",
"ttl_secs",
")",
")"
] | Like :meth:`~simplekv.KeyValueStore.put_file`, but with an
additional parameter:
:param ttl_secs: Number of seconds until the key expires. See above
for valid values.
:raises exceptions.ValueError: If ``ttl_secs`` is invalid. | [
"Like",
":",
"meth",
":",
"~simplekv",
".",
"KeyValueStore",
".",
"put_file",
"but",
"with",
"an",
"additional",
"parameter",
":"
] | python | train |
Clinical-Genomics/trailblazer | trailblazer/mip/files.py | https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/mip/files.py#L184-L192 | def parse_chanjo_sexcheck(handle: TextIO):
"""Parse Chanjo sex-check output."""
samples = csv.DictReader(handle, delimiter='\t')
for sample in samples:
return {
'predicted_sex': sample['sex'],
'x_coverage': float(sample['#X_coverage']),
'y_coverage': float(sample['Y_coverage']),
} | [
"def",
"parse_chanjo_sexcheck",
"(",
"handle",
":",
"TextIO",
")",
":",
"samples",
"=",
"csv",
".",
"DictReader",
"(",
"handle",
",",
"delimiter",
"=",
"'\\t'",
")",
"for",
"sample",
"in",
"samples",
":",
"return",
"{",
"'predicted_sex'",
":",
"sample",
"[",
"'sex'",
"]",
",",
"'x_coverage'",
":",
"float",
"(",
"sample",
"[",
"'#X_coverage'",
"]",
")",
",",
"'y_coverage'",
":",
"float",
"(",
"sample",
"[",
"'Y_coverage'",
"]",
")",
",",
"}"
] | Parse Chanjo sex-check output. | [
"Parse",
"Chanjo",
"sex",
"-",
"check",
"output",
"."
] | python | train |
jtmoulia/switchboard-python | aplus/__init__.py | https://github.com/jtmoulia/switchboard-python/blob/c9b0cb74cb12a64385465091be633e78d39f08b1/aplus/__init__.py#L121-L248 | def then(self, success=None, failure=None):
"""
This method takes two optional arguments. The first argument
is used if the "self promise" is fulfilled and the other is
used if the "self promise" is rejected. In either case, this
method returns another promise that effectively represents
the result of either the first of the second argument (in the
case that the "self promise" is fulfilled or rejected,
respectively).
Each argument can be either:
* None - Meaning no action is taken
* A function - which will be called with either the value
of the "self promise" or the reason for rejection of
the "self promise". The function may return:
* A value - which will be used to fulfill the promise
returned by this method.
* A promise - which, when fulfilled or rejected, will
cascade its value or reason to the promise returned
by this method.
* A value - which will be assigned as either the value
or the reason for the promise returned by this method
when the "self promise" is either fulfilled or rejected,
respectively.
"""
ret = Promise()
def callAndFulfill(v):
"""
A callback to be invoked if the "self promise"
is fulfilled.
"""
try:
# From 3.2.1, don't call non-functions values
if _isFunction(success):
newvalue = success(v)
if _isPromise(newvalue):
newvalue.then(lambda v: ret.fulfill(v),
lambda r: ret.reject(r))
else:
ret.fulfill(newvalue)
elif success!=None:
# From 3.2.6.4
ret.fulfill(v)
else:
pass
except Exception as e:
ret.reject(e)
def callAndReject(r):
"""
A callback to be invoked if the "self promise"
is rejected.
"""
try:
if _isFunction(failure):
newvalue = failure(r)
if _isPromise(newvalue):
newvalue.then(lambda v: ret.fulfill(v),
lambda r: ret.reject(r))
else:
ret.fulfill(newvalue)
elif failure!=None:
# From 3.2.6.5
ret.reject(r)
else:
pass
except Exception as e:
ret.reject(e)
if self._state==self.PENDING:
"""
If this is still pending, then add callbacks to the
existing promise that call either the success or
rejected functions supplied and then fulfill the
promise being returned by this method
"""
if success!=None:
self._callbacks.append(callAndFulfill)
if failure!=None:
self._errbacks.append(callAndReject)
elif self._state==self.FULFILLED:
"""
If this promise was already fulfilled, then
we need to use the first argument to this method
to determine the value to use in fulfilling the
promise that we return from this method.
"""
try:
if _isFunction(success):
newvalue = success(self.value)
if _isPromise(newvalue):
newvalue.then(lambda v: ret.fulfill(v),
lambda r: ret.reject(r))
else:
ret.fulfill(newvalue)
elif success!=None:
# From 3.2.6.4
ret.fulfill(self.value)
else:
pass
except Exception as e:
ret.reject(e)
elif self._state==self.REJECTED:
"""
If this promise was already rejected, then
we need to use the second argument to this method
to determine the value to use in fulfilling the
promise that we return from this method.
"""
try:
if _isFunction(failure):
newvalue = failure(self.reason)
if _isPromise(newvalue):
newvalue.then(lambda v: ret.fulfill(v),
lambda r: ret.reject(r))
else:
ret.fulfill(newvalue)
elif failure!=None:
# From 3.2.6.5
ret.reject(self.reason)
else:
pass
except Exception as e:
ret.reject(e)
return ret | [
"def",
"then",
"(",
"self",
",",
"success",
"=",
"None",
",",
"failure",
"=",
"None",
")",
":",
"ret",
"=",
"Promise",
"(",
")",
"def",
"callAndFulfill",
"(",
"v",
")",
":",
"\"\"\"\n A callback to be invoked if the \"self promise\"\n is fulfilled.\n \"\"\"",
"try",
":",
"# From 3.2.1, don't call non-functions values",
"if",
"_isFunction",
"(",
"success",
")",
":",
"newvalue",
"=",
"success",
"(",
"v",
")",
"if",
"_isPromise",
"(",
"newvalue",
")",
":",
"newvalue",
".",
"then",
"(",
"lambda",
"v",
":",
"ret",
".",
"fulfill",
"(",
"v",
")",
",",
"lambda",
"r",
":",
"ret",
".",
"reject",
"(",
"r",
")",
")",
"else",
":",
"ret",
".",
"fulfill",
"(",
"newvalue",
")",
"elif",
"success",
"!=",
"None",
":",
"# From 3.2.6.4",
"ret",
".",
"fulfill",
"(",
"v",
")",
"else",
":",
"pass",
"except",
"Exception",
"as",
"e",
":",
"ret",
".",
"reject",
"(",
"e",
")",
"def",
"callAndReject",
"(",
"r",
")",
":",
"\"\"\"\n A callback to be invoked if the \"self promise\"\n is rejected.\n \"\"\"",
"try",
":",
"if",
"_isFunction",
"(",
"failure",
")",
":",
"newvalue",
"=",
"failure",
"(",
"r",
")",
"if",
"_isPromise",
"(",
"newvalue",
")",
":",
"newvalue",
".",
"then",
"(",
"lambda",
"v",
":",
"ret",
".",
"fulfill",
"(",
"v",
")",
",",
"lambda",
"r",
":",
"ret",
".",
"reject",
"(",
"r",
")",
")",
"else",
":",
"ret",
".",
"fulfill",
"(",
"newvalue",
")",
"elif",
"failure",
"!=",
"None",
":",
"# From 3.2.6.5",
"ret",
".",
"reject",
"(",
"r",
")",
"else",
":",
"pass",
"except",
"Exception",
"as",
"e",
":",
"ret",
".",
"reject",
"(",
"e",
")",
"if",
"self",
".",
"_state",
"==",
"self",
".",
"PENDING",
":",
"\"\"\"\n If this is still pending, then add callbacks to the\n existing promise that call either the success or\n rejected functions supplied and then fulfill the\n promise being returned by this method\n \"\"\"",
"if",
"success",
"!=",
"None",
":",
"self",
".",
"_callbacks",
".",
"append",
"(",
"callAndFulfill",
")",
"if",
"failure",
"!=",
"None",
":",
"self",
".",
"_errbacks",
".",
"append",
"(",
"callAndReject",
")",
"elif",
"self",
".",
"_state",
"==",
"self",
".",
"FULFILLED",
":",
"\"\"\"\n If this promise was already fulfilled, then\n we need to use the first argument to this method\n to determine the value to use in fulfilling the\n promise that we return from this method.\n \"\"\"",
"try",
":",
"if",
"_isFunction",
"(",
"success",
")",
":",
"newvalue",
"=",
"success",
"(",
"self",
".",
"value",
")",
"if",
"_isPromise",
"(",
"newvalue",
")",
":",
"newvalue",
".",
"then",
"(",
"lambda",
"v",
":",
"ret",
".",
"fulfill",
"(",
"v",
")",
",",
"lambda",
"r",
":",
"ret",
".",
"reject",
"(",
"r",
")",
")",
"else",
":",
"ret",
".",
"fulfill",
"(",
"newvalue",
")",
"elif",
"success",
"!=",
"None",
":",
"# From 3.2.6.4",
"ret",
".",
"fulfill",
"(",
"self",
".",
"value",
")",
"else",
":",
"pass",
"except",
"Exception",
"as",
"e",
":",
"ret",
".",
"reject",
"(",
"e",
")",
"elif",
"self",
".",
"_state",
"==",
"self",
".",
"REJECTED",
":",
"\"\"\"\n If this promise was already rejected, then\n we need to use the second argument to this method\n to determine the value to use in fulfilling the\n promise that we return from this method.\n \"\"\"",
"try",
":",
"if",
"_isFunction",
"(",
"failure",
")",
":",
"newvalue",
"=",
"failure",
"(",
"self",
".",
"reason",
")",
"if",
"_isPromise",
"(",
"newvalue",
")",
":",
"newvalue",
".",
"then",
"(",
"lambda",
"v",
":",
"ret",
".",
"fulfill",
"(",
"v",
")",
",",
"lambda",
"r",
":",
"ret",
".",
"reject",
"(",
"r",
")",
")",
"else",
":",
"ret",
".",
"fulfill",
"(",
"newvalue",
")",
"elif",
"failure",
"!=",
"None",
":",
"# From 3.2.6.5",
"ret",
".",
"reject",
"(",
"self",
".",
"reason",
")",
"else",
":",
"pass",
"except",
"Exception",
"as",
"e",
":",
"ret",
".",
"reject",
"(",
"e",
")",
"return",
"ret"
] | This method takes two optional arguments. The first argument
is used if the "self promise" is fulfilled and the other is
used if the "self promise" is rejected. In either case, this
method returns another promise that effectively represents
the result of either the first of the second argument (in the
case that the "self promise" is fulfilled or rejected,
respectively).
Each argument can be either:
* None - Meaning no action is taken
* A function - which will be called with either the value
of the "self promise" or the reason for rejection of
the "self promise". The function may return:
* A value - which will be used to fulfill the promise
returned by this method.
* A promise - which, when fulfilled or rejected, will
cascade its value or reason to the promise returned
by this method.
* A value - which will be assigned as either the value
or the reason for the promise returned by this method
when the "self promise" is either fulfilled or rejected,
respectively. | [
"This",
"method",
"takes",
"two",
"optional",
"arguments",
".",
"The",
"first",
"argument",
"is",
"used",
"if",
"the",
"self",
"promise",
"is",
"fulfilled",
"and",
"the",
"other",
"is",
"used",
"if",
"the",
"self",
"promise",
"is",
"rejected",
".",
"In",
"either",
"case",
"this",
"method",
"returns",
"another",
"promise",
"that",
"effectively",
"represents",
"the",
"result",
"of",
"either",
"the",
"first",
"of",
"the",
"second",
"argument",
"(",
"in",
"the",
"case",
"that",
"the",
"self",
"promise",
"is",
"fulfilled",
"or",
"rejected",
"respectively",
")",
"."
] | python | train |
google/tangent | tangent/create.py | https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/create.py#L100-L119 | def create_temp(node, namer):
"""Create a temporary variable.
Args:
node: Create a temporary variable to store this variable in.
namer: A naming object that guarantees the names are unique.
Returns:
node: See `create_grad`. Returns a temporary variable, which is always a
simple variable annotated with `temp_var`.
"""
if isinstance(node, gast.Name):
name = node.id
elif isinstance(node, (gast.Attribute, gast.Subscript)):
name = node.value.id
else:
raise TypeError
temp_node = gast.Name(id=namer.temp(name), annotation=None, ctx=None)
anno.setanno(temp_node, 'temp_var', node)
return temp_node | [
"def",
"create_temp",
"(",
"node",
",",
"namer",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"gast",
".",
"Name",
")",
":",
"name",
"=",
"node",
".",
"id",
"elif",
"isinstance",
"(",
"node",
",",
"(",
"gast",
".",
"Attribute",
",",
"gast",
".",
"Subscript",
")",
")",
":",
"name",
"=",
"node",
".",
"value",
".",
"id",
"else",
":",
"raise",
"TypeError",
"temp_node",
"=",
"gast",
".",
"Name",
"(",
"id",
"=",
"namer",
".",
"temp",
"(",
"name",
")",
",",
"annotation",
"=",
"None",
",",
"ctx",
"=",
"None",
")",
"anno",
".",
"setanno",
"(",
"temp_node",
",",
"'temp_var'",
",",
"node",
")",
"return",
"temp_node"
] | Create a temporary variable.
Args:
node: Create a temporary variable to store this variable in.
namer: A naming object that guarantees the names are unique.
Returns:
node: See `create_grad`. Returns a temporary variable, which is always a
simple variable annotated with `temp_var`. | [
"Create",
"a",
"temporary",
"variable",
"."
] | python | train |
Nic30/hwt | hwt/hdl/types/bitsCast.py | https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/types/bitsCast.py#L96-L113 | def reinterpretBits(self, sigOrVal, toType):
"""
Cast object of same bit size between to other type
(f.e. bits to struct, union or array)
"""
if isinstance(sigOrVal, Value):
return reinterpretBits__val(self, sigOrVal, toType)
elif isinstance(toType, Bits):
return fitTo_t(sigOrVal, toType)
elif sigOrVal._dtype.bit_length() == toType.bit_length():
if isinstance(toType, HStruct):
raise reinterpret_bits_to_hstruct(sigOrVal, toType)
elif isinstance(toType, HUnion):
raise NotImplementedError()
elif isinstance(toType, HArray):
reinterpret_bits_to_harray(sigOrVal, toType)
return default_auto_cast_fn(self, sigOrVal, toType) | [
"def",
"reinterpretBits",
"(",
"self",
",",
"sigOrVal",
",",
"toType",
")",
":",
"if",
"isinstance",
"(",
"sigOrVal",
",",
"Value",
")",
":",
"return",
"reinterpretBits__val",
"(",
"self",
",",
"sigOrVal",
",",
"toType",
")",
"elif",
"isinstance",
"(",
"toType",
",",
"Bits",
")",
":",
"return",
"fitTo_t",
"(",
"sigOrVal",
",",
"toType",
")",
"elif",
"sigOrVal",
".",
"_dtype",
".",
"bit_length",
"(",
")",
"==",
"toType",
".",
"bit_length",
"(",
")",
":",
"if",
"isinstance",
"(",
"toType",
",",
"HStruct",
")",
":",
"raise",
"reinterpret_bits_to_hstruct",
"(",
"sigOrVal",
",",
"toType",
")",
"elif",
"isinstance",
"(",
"toType",
",",
"HUnion",
")",
":",
"raise",
"NotImplementedError",
"(",
")",
"elif",
"isinstance",
"(",
"toType",
",",
"HArray",
")",
":",
"reinterpret_bits_to_harray",
"(",
"sigOrVal",
",",
"toType",
")",
"return",
"default_auto_cast_fn",
"(",
"self",
",",
"sigOrVal",
",",
"toType",
")"
] | Cast object of same bit size between to other type
(f.e. bits to struct, union or array) | [
"Cast",
"object",
"of",
"same",
"bit",
"size",
"between",
"to",
"other",
"type",
"(",
"f",
".",
"e",
".",
"bits",
"to",
"struct",
"union",
"or",
"array",
")"
] | python | test |
DLR-RM/RAFCON | source/rafcon/gui/mygaphas/tools.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/tools.py#L579-L587 | def _set_motion_handle(self, event):
"""Sets motion handle to currently grabbed handle
"""
item = self.grabbed_item
handle = self.grabbed_handle
pos = event.x, event.y
self.motion_handle = HandleInMotion(item, handle, self.view)
self.motion_handle.GLUE_DISTANCE = self._parent_state_v.border_width
self.motion_handle.start_move(pos) | [
"def",
"_set_motion_handle",
"(",
"self",
",",
"event",
")",
":",
"item",
"=",
"self",
".",
"grabbed_item",
"handle",
"=",
"self",
".",
"grabbed_handle",
"pos",
"=",
"event",
".",
"x",
",",
"event",
".",
"y",
"self",
".",
"motion_handle",
"=",
"HandleInMotion",
"(",
"item",
",",
"handle",
",",
"self",
".",
"view",
")",
"self",
".",
"motion_handle",
".",
"GLUE_DISTANCE",
"=",
"self",
".",
"_parent_state_v",
".",
"border_width",
"self",
".",
"motion_handle",
".",
"start_move",
"(",
"pos",
")"
] | Sets motion handle to currently grabbed handle | [
"Sets",
"motion",
"handle",
"to",
"currently",
"grabbed",
"handle"
] | python | train |
Azure/azure-cli-extensions | src/storage-preview/azext_storage_preview/_format.py | https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/storage-preview/azext_storage_preview/_format.py#L72-L90 | def transform_file_output(result):
""" Transform to convert SDK file/dir list output to something that
more clearly distinguishes between files and directories. """
from collections import OrderedDict
new_result = []
iterable = result if isinstance(result, list) else result.get('items', result)
for item in iterable:
new_entry = OrderedDict()
entity_type = item['type'] # type property is added by transform_file_directory_result
is_dir = entity_type == 'dir'
new_entry['Name'] = item['name'] + '/' if is_dir else item['name']
new_entry['Content Length'] = ' ' if is_dir else item['properties']['contentLength']
new_entry['Type'] = item['type']
new_entry['Last Modified'] = item['properties']['lastModified'] or ' '
new_result.append(new_entry)
return sorted(new_result, key=lambda k: k['Name']) | [
"def",
"transform_file_output",
"(",
"result",
")",
":",
"from",
"collections",
"import",
"OrderedDict",
"new_result",
"=",
"[",
"]",
"iterable",
"=",
"result",
"if",
"isinstance",
"(",
"result",
",",
"list",
")",
"else",
"result",
".",
"get",
"(",
"'items'",
",",
"result",
")",
"for",
"item",
"in",
"iterable",
":",
"new_entry",
"=",
"OrderedDict",
"(",
")",
"entity_type",
"=",
"item",
"[",
"'type'",
"]",
"# type property is added by transform_file_directory_result",
"is_dir",
"=",
"entity_type",
"==",
"'dir'",
"new_entry",
"[",
"'Name'",
"]",
"=",
"item",
"[",
"'name'",
"]",
"+",
"'/'",
"if",
"is_dir",
"else",
"item",
"[",
"'name'",
"]",
"new_entry",
"[",
"'Content Length'",
"]",
"=",
"' '",
"if",
"is_dir",
"else",
"item",
"[",
"'properties'",
"]",
"[",
"'contentLength'",
"]",
"new_entry",
"[",
"'Type'",
"]",
"=",
"item",
"[",
"'type'",
"]",
"new_entry",
"[",
"'Last Modified'",
"]",
"=",
"item",
"[",
"'properties'",
"]",
"[",
"'lastModified'",
"]",
"or",
"' '",
"new_result",
".",
"append",
"(",
"new_entry",
")",
"return",
"sorted",
"(",
"new_result",
",",
"key",
"=",
"lambda",
"k",
":",
"k",
"[",
"'Name'",
"]",
")"
] | Transform to convert SDK file/dir list output to something that
more clearly distinguishes between files and directories. | [
"Transform",
"to",
"convert",
"SDK",
"file",
"/",
"dir",
"list",
"output",
"to",
"something",
"that",
"more",
"clearly",
"distinguishes",
"between",
"files",
"and",
"directories",
"."
] | python | train |
danielperna84/pyhomematic | pyhomematic/_hm.py | https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/_hm.py#L147-L196 | def createDeviceObjects(self, interface_id):
"""Transform the raw device descriptions into instances of devicetypes.generic.HMDevice or availabe subclass."""
global WORKING
WORKING = True
remote = interface_id.split('-')[-1]
LOG.debug(
"RPCFunctions.createDeviceObjects: iterating interface_id = %s" % (remote, ))
# First create parent object
for dev in self._devices_raw[remote]:
if not dev['PARENT']:
if dev['ADDRESS'] not in self.devices_all[remote]:
try:
if dev['TYPE'] in devicetypes.SUPPORTED:
deviceObject = devicetypes.SUPPORTED[dev['TYPE']](
dev, self._proxies[interface_id], self.resolveparamsets)
LOG.debug("RPCFunctions.createDeviceObjects: created %s as SUPPORTED device for %s" % (
dev['ADDRESS'], dev['TYPE']))
else:
deviceObject = devicetypes.UNSUPPORTED(
dev, self._proxies[interface_id], self.resolveparamsets)
LOG.debug("RPCFunctions.createDeviceObjects: created %s as UNSUPPORTED device for %s" % (
dev['ADDRESS'], dev['TYPE']))
LOG.debug(
"RPCFunctions.createDeviceObjects: adding to self.devices_all")
self.devices_all[remote][dev['ADDRESS']] = deviceObject
LOG.debug(
"RPCFunctions.createDeviceObjects: adding to self.devices")
self.devices[remote][dev['ADDRESS']] = deviceObject
except Exception as err:
LOG.critical(
"RPCFunctions.createDeviceObjects: Parent: %s", str(err))
# Then create all children for parent
for dev in self._devices_raw[remote]:
if dev['PARENT']:
try:
if dev['ADDRESS'] not in self.devices_all[remote]:
deviceObject = HMChannel(
dev, self._proxies[interface_id], self.resolveparamsets)
self.devices_all[remote][dev['ADDRESS']] = deviceObject
self.devices[remote][dev['PARENT']].CHANNELS[
dev['INDEX']] = deviceObject
except Exception as err:
LOG.critical(
"RPCFunctions.createDeviceObjects: Child: %s", str(err))
if self.devices_all[remote] and self.remotes[remote].get('resolvenames', False):
self.addDeviceNames(remote)
WORKING = False
if self.systemcallback:
self.systemcallback('createDeviceObjects')
return True | [
"def",
"createDeviceObjects",
"(",
"self",
",",
"interface_id",
")",
":",
"global",
"WORKING",
"WORKING",
"=",
"True",
"remote",
"=",
"interface_id",
".",
"split",
"(",
"'-'",
")",
"[",
"-",
"1",
"]",
"LOG",
".",
"debug",
"(",
"\"RPCFunctions.createDeviceObjects: iterating interface_id = %s\"",
"%",
"(",
"remote",
",",
")",
")",
"# First create parent object",
"for",
"dev",
"in",
"self",
".",
"_devices_raw",
"[",
"remote",
"]",
":",
"if",
"not",
"dev",
"[",
"'PARENT'",
"]",
":",
"if",
"dev",
"[",
"'ADDRESS'",
"]",
"not",
"in",
"self",
".",
"devices_all",
"[",
"remote",
"]",
":",
"try",
":",
"if",
"dev",
"[",
"'TYPE'",
"]",
"in",
"devicetypes",
".",
"SUPPORTED",
":",
"deviceObject",
"=",
"devicetypes",
".",
"SUPPORTED",
"[",
"dev",
"[",
"'TYPE'",
"]",
"]",
"(",
"dev",
",",
"self",
".",
"_proxies",
"[",
"interface_id",
"]",
",",
"self",
".",
"resolveparamsets",
")",
"LOG",
".",
"debug",
"(",
"\"RPCFunctions.createDeviceObjects: created %s as SUPPORTED device for %s\"",
"%",
"(",
"dev",
"[",
"'ADDRESS'",
"]",
",",
"dev",
"[",
"'TYPE'",
"]",
")",
")",
"else",
":",
"deviceObject",
"=",
"devicetypes",
".",
"UNSUPPORTED",
"(",
"dev",
",",
"self",
".",
"_proxies",
"[",
"interface_id",
"]",
",",
"self",
".",
"resolveparamsets",
")",
"LOG",
".",
"debug",
"(",
"\"RPCFunctions.createDeviceObjects: created %s as UNSUPPORTED device for %s\"",
"%",
"(",
"dev",
"[",
"'ADDRESS'",
"]",
",",
"dev",
"[",
"'TYPE'",
"]",
")",
")",
"LOG",
".",
"debug",
"(",
"\"RPCFunctions.createDeviceObjects: adding to self.devices_all\"",
")",
"self",
".",
"devices_all",
"[",
"remote",
"]",
"[",
"dev",
"[",
"'ADDRESS'",
"]",
"]",
"=",
"deviceObject",
"LOG",
".",
"debug",
"(",
"\"RPCFunctions.createDeviceObjects: adding to self.devices\"",
")",
"self",
".",
"devices",
"[",
"remote",
"]",
"[",
"dev",
"[",
"'ADDRESS'",
"]",
"]",
"=",
"deviceObject",
"except",
"Exception",
"as",
"err",
":",
"LOG",
".",
"critical",
"(",
"\"RPCFunctions.createDeviceObjects: Parent: %s\"",
",",
"str",
"(",
"err",
")",
")",
"# Then create all children for parent",
"for",
"dev",
"in",
"self",
".",
"_devices_raw",
"[",
"remote",
"]",
":",
"if",
"dev",
"[",
"'PARENT'",
"]",
":",
"try",
":",
"if",
"dev",
"[",
"'ADDRESS'",
"]",
"not",
"in",
"self",
".",
"devices_all",
"[",
"remote",
"]",
":",
"deviceObject",
"=",
"HMChannel",
"(",
"dev",
",",
"self",
".",
"_proxies",
"[",
"interface_id",
"]",
",",
"self",
".",
"resolveparamsets",
")",
"self",
".",
"devices_all",
"[",
"remote",
"]",
"[",
"dev",
"[",
"'ADDRESS'",
"]",
"]",
"=",
"deviceObject",
"self",
".",
"devices",
"[",
"remote",
"]",
"[",
"dev",
"[",
"'PARENT'",
"]",
"]",
".",
"CHANNELS",
"[",
"dev",
"[",
"'INDEX'",
"]",
"]",
"=",
"deviceObject",
"except",
"Exception",
"as",
"err",
":",
"LOG",
".",
"critical",
"(",
"\"RPCFunctions.createDeviceObjects: Child: %s\"",
",",
"str",
"(",
"err",
")",
")",
"if",
"self",
".",
"devices_all",
"[",
"remote",
"]",
"and",
"self",
".",
"remotes",
"[",
"remote",
"]",
".",
"get",
"(",
"'resolvenames'",
",",
"False",
")",
":",
"self",
".",
"addDeviceNames",
"(",
"remote",
")",
"WORKING",
"=",
"False",
"if",
"self",
".",
"systemcallback",
":",
"self",
".",
"systemcallback",
"(",
"'createDeviceObjects'",
")",
"return",
"True"
] | Transform the raw device descriptions into instances of devicetypes.generic.HMDevice or availabe subclass. | [
"Transform",
"the",
"raw",
"device",
"descriptions",
"into",
"instances",
"of",
"devicetypes",
".",
"generic",
".",
"HMDevice",
"or",
"availabe",
"subclass",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/variation/mutect2.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L48-L60 | def _add_region_params(region, out_file, items, gatk_type):
"""Add parameters for selecting by region to command line.
"""
params = []
variant_regions = bedutils.population_variant_regions(items)
region = subset_variant_regions(variant_regions, region, out_file, items)
if region:
if gatk_type == "gatk4":
params += ["-L", bamprep.region_to_gatk(region), "--interval-set-rule", "INTERSECTION"]
else:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"]
params += gatk.standard_cl_params(items)
return params | [
"def",
"_add_region_params",
"(",
"region",
",",
"out_file",
",",
"items",
",",
"gatk_type",
")",
":",
"params",
"=",
"[",
"]",
"variant_regions",
"=",
"bedutils",
".",
"population_variant_regions",
"(",
"items",
")",
"region",
"=",
"subset_variant_regions",
"(",
"variant_regions",
",",
"region",
",",
"out_file",
",",
"items",
")",
"if",
"region",
":",
"if",
"gatk_type",
"==",
"\"gatk4\"",
":",
"params",
"+=",
"[",
"\"-L\"",
",",
"bamprep",
".",
"region_to_gatk",
"(",
"region",
")",
",",
"\"--interval-set-rule\"",
",",
"\"INTERSECTION\"",
"]",
"else",
":",
"params",
"+=",
"[",
"\"-L\"",
",",
"bamprep",
".",
"region_to_gatk",
"(",
"region",
")",
",",
"\"--interval_set_rule\"",
",",
"\"INTERSECTION\"",
"]",
"params",
"+=",
"gatk",
".",
"standard_cl_params",
"(",
"items",
")",
"return",
"params"
] | Add parameters for selecting by region to command line. | [
"Add",
"parameters",
"for",
"selecting",
"by",
"region",
"to",
"command",
"line",
"."
] | python | train |
mrstephenneal/pdfconduit | sandbox/pdfrw_rotate.py | https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/sandbox/pdfrw_rotate.py#L12-L35 | def rotate(file_name, rotate, suffix='rotated', tempdir=None):
"""Rotate PDF by increments of 90 degrees."""
# Set output file name
if tempdir:
outfn = NamedTemporaryFile(suffix='.pdf', dir=tempdir, delete=False).name
elif suffix:
outfn = os.path.join(os.path.dirname(file_name), add_suffix(file_name, suffix))
else:
outfn = NamedTemporaryFile(suffix='.pdf').name
trailer = PdfReader(file_name)
pages = trailer.pages
ranges = [[1, len(pages)]]
for onerange in ranges:
onerange = (onerange + onerange[-1:])[:2]
for pagenum in range(onerange[0] - 1, onerange[1]):
pages[pagenum].Rotate = (int(pages[pagenum].inheritable.Rotate or 0) + rotate) % 360
outdata = PdfWriter(outfn)
outdata.trailer = trailer
outdata.write()
return outfn | [
"def",
"rotate",
"(",
"file_name",
",",
"rotate",
",",
"suffix",
"=",
"'rotated'",
",",
"tempdir",
"=",
"None",
")",
":",
"# Set output file name",
"if",
"tempdir",
":",
"outfn",
"=",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.pdf'",
",",
"dir",
"=",
"tempdir",
",",
"delete",
"=",
"False",
")",
".",
"name",
"elif",
"suffix",
":",
"outfn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"file_name",
")",
",",
"add_suffix",
"(",
"file_name",
",",
"suffix",
")",
")",
"else",
":",
"outfn",
"=",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"'.pdf'",
")",
".",
"name",
"trailer",
"=",
"PdfReader",
"(",
"file_name",
")",
"pages",
"=",
"trailer",
".",
"pages",
"ranges",
"=",
"[",
"[",
"1",
",",
"len",
"(",
"pages",
")",
"]",
"]",
"for",
"onerange",
"in",
"ranges",
":",
"onerange",
"=",
"(",
"onerange",
"+",
"onerange",
"[",
"-",
"1",
":",
"]",
")",
"[",
":",
"2",
"]",
"for",
"pagenum",
"in",
"range",
"(",
"onerange",
"[",
"0",
"]",
"-",
"1",
",",
"onerange",
"[",
"1",
"]",
")",
":",
"pages",
"[",
"pagenum",
"]",
".",
"Rotate",
"=",
"(",
"int",
"(",
"pages",
"[",
"pagenum",
"]",
".",
"inheritable",
".",
"Rotate",
"or",
"0",
")",
"+",
"rotate",
")",
"%",
"360",
"outdata",
"=",
"PdfWriter",
"(",
"outfn",
")",
"outdata",
".",
"trailer",
"=",
"trailer",
"outdata",
".",
"write",
"(",
")",
"return",
"outfn"
] | Rotate PDF by increments of 90 degrees. | [
"Rotate",
"PDF",
"by",
"increments",
"of",
"90",
"degrees",
"."
] | python | train |
TrafficSenseMSD/SumoTools | traci/_vehicle.py | https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_vehicle.py#L803-L813 | def slowDown(self, vehID, speed, duration):
"""slowDown(string, double, int) -> None
Changes the speed smoothly to the given value over the given amount
of time in ms (can also be used to increase speed).
"""
self._connection._beginMessage(
tc.CMD_SET_VEHICLE_VARIABLE, tc.CMD_SLOWDOWN, vehID, 1 + 4 + 1 + 8 + 1 + 4)
self._connection._string += struct.pack(
"!BiBdBi", tc.TYPE_COMPOUND, 2, tc.TYPE_DOUBLE, speed, tc.TYPE_INTEGER, duration)
self._connection._sendExact() | [
"def",
"slowDown",
"(",
"self",
",",
"vehID",
",",
"speed",
",",
"duration",
")",
":",
"self",
".",
"_connection",
".",
"_beginMessage",
"(",
"tc",
".",
"CMD_SET_VEHICLE_VARIABLE",
",",
"tc",
".",
"CMD_SLOWDOWN",
",",
"vehID",
",",
"1",
"+",
"4",
"+",
"1",
"+",
"8",
"+",
"1",
"+",
"4",
")",
"self",
".",
"_connection",
".",
"_string",
"+=",
"struct",
".",
"pack",
"(",
"\"!BiBdBi\"",
",",
"tc",
".",
"TYPE_COMPOUND",
",",
"2",
",",
"tc",
".",
"TYPE_DOUBLE",
",",
"speed",
",",
"tc",
".",
"TYPE_INTEGER",
",",
"duration",
")",
"self",
".",
"_connection",
".",
"_sendExact",
"(",
")"
] | slowDown(string, double, int) -> None
Changes the speed smoothly to the given value over the given amount
of time in ms (can also be used to increase speed). | [
"slowDown",
"(",
"string",
"double",
"int",
")",
"-",
">",
"None"
] | python | train |
synw/dataswim | dataswim/data/__init__.py | https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/__init__.py#L107-L122 | def load_h5(self, filepath):
"""Load a Hdf5 file to the main dataframe
:param filepath: url of the csv file to load,
can be absolute if it starts with ``/``
or relative if it starts with ``./``
:type filepath: str
:example: ``ds.load_h5("./myfile.hdf5")``
"""
try:
self.start("Loading Hdf5 data...")
self.df = dd.io.load(filepath)
self.end("Finished loading Hdf5 data")
except Exception as e:
self.err(e, "Can not load Hdf5 file") | [
"def",
"load_h5",
"(",
"self",
",",
"filepath",
")",
":",
"try",
":",
"self",
".",
"start",
"(",
"\"Loading Hdf5 data...\"",
")",
"self",
".",
"df",
"=",
"dd",
".",
"io",
".",
"load",
"(",
"filepath",
")",
"self",
".",
"end",
"(",
"\"Finished loading Hdf5 data\"",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"err",
"(",
"e",
",",
"\"Can not load Hdf5 file\"",
")"
] | Load a Hdf5 file to the main dataframe
:param filepath: url of the csv file to load,
can be absolute if it starts with ``/``
or relative if it starts with ``./``
:type filepath: str
:example: ``ds.load_h5("./myfile.hdf5")`` | [
"Load",
"a",
"Hdf5",
"file",
"to",
"the",
"main",
"dataframe"
] | python | train |
apache/incubator-mxnet | example/cnn_text_classification/data_helpers.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/cnn_text_classification/data_helpers.py#L33-L50 | def clean_str(string):
"""Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", r" \( ", string)
string = re.sub(r"\)", r" \) ", string)
string = re.sub(r"\?", r" \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower() | [
"def",
"clean_str",
"(",
"string",
")",
":",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"[^A-Za-z0-9(),!?\\'\\`]\"",
",",
"\" \"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'s\"",
",",
"\" \\'s\"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'ve\"",
",",
"\" \\'ve\"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"n\\'t\"",
",",
"\" n\\'t\"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'re\"",
",",
"\" \\'re\"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'d\"",
",",
"\" \\'d\"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\'ll\"",
",",
"\" \\'ll\"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\",\"",
",",
"\" , \"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"!\"",
",",
"\" ! \"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\(\"",
",",
"r\" \\( \"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\)\"",
",",
"r\" \\) \"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\?\"",
",",
"r\" \\? \"",
",",
"string",
")",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"\\s{2,}\"",
",",
"\" \"",
",",
"string",
")",
"return",
"string",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")"
] | Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py | [
"Tokenization",
"/",
"string",
"cleaning",
"for",
"all",
"datasets",
"except",
"for",
"SST",
".",
"Original",
"taken",
"from",
"https",
":",
"//",
"github",
".",
"com",
"/",
"yoonkim",
"/",
"CNN_sentence",
"/",
"blob",
"/",
"master",
"/",
"process_data",
".",
"py"
] | python | train |
sampottinger/pycotracer | pycotracer/report_interpreters.py | https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/report_interpreters.py#L156-L209 | def interpret_expenditure_entry(entry):
"""Interpret data fields within a CO-TRACER expediture report.
Interpret the expenditure amount, expenditure date, filed date, amended,
and amendment fields of the provided entry. All dates (expenditure and
filed) are interpreted together and, if any fails, all will retain their
original value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Entry may be
edited in place and side-effects are possible in coupled code. However,
client code should use the return value to guard against future changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
(ExpenditureAmount) or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
(ExpenditureDate and FiledDate) or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
(Amended and Amendment) or not respectively.
@param entry: The expenditure report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry.
"""
try:
expenditure_amount = float(entry['ExpenditureAmount'])
entry['AmountsInterpreted'] = True
entry['ExpenditureAmount'] = expenditure_amount
except ValueError:
entry['AmountsInterpreted'] = False
try:
expenditure_date = parse_iso_str(entry['ExpenditureDate'])
filed_date = parse_iso_str(entry['FiledDate'])
entry['DatesInterpreted'] = True
entry['ExpenditureDate'] = expenditure_date
entry['FiledDate'] = filed_date
except ValueError:
entry['DatesInterpreted'] = False
try:
amended = parse_yes_no_str(entry['Amended'])
amendment = parse_yes_no_str(entry['Amendment'])
entry['BooleanFieldsInterpreted'] = True
entry['Amended'] = amended
entry['Amendment'] = amendment
except ValueError:
entry['BooleanFieldsInterpreted'] = False
return entry | [
"def",
"interpret_expenditure_entry",
"(",
"entry",
")",
":",
"try",
":",
"expenditure_amount",
"=",
"float",
"(",
"entry",
"[",
"'ExpenditureAmount'",
"]",
")",
"entry",
"[",
"'AmountsInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'ExpenditureAmount'",
"]",
"=",
"expenditure_amount",
"except",
"ValueError",
":",
"entry",
"[",
"'AmountsInterpreted'",
"]",
"=",
"False",
"try",
":",
"expenditure_date",
"=",
"parse_iso_str",
"(",
"entry",
"[",
"'ExpenditureDate'",
"]",
")",
"filed_date",
"=",
"parse_iso_str",
"(",
"entry",
"[",
"'FiledDate'",
"]",
")",
"entry",
"[",
"'DatesInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'ExpenditureDate'",
"]",
"=",
"expenditure_date",
"entry",
"[",
"'FiledDate'",
"]",
"=",
"filed_date",
"except",
"ValueError",
":",
"entry",
"[",
"'DatesInterpreted'",
"]",
"=",
"False",
"try",
":",
"amended",
"=",
"parse_yes_no_str",
"(",
"entry",
"[",
"'Amended'",
"]",
")",
"amendment",
"=",
"parse_yes_no_str",
"(",
"entry",
"[",
"'Amendment'",
"]",
")",
"entry",
"[",
"'BooleanFieldsInterpreted'",
"]",
"=",
"True",
"entry",
"[",
"'Amended'",
"]",
"=",
"amended",
"entry",
"[",
"'Amendment'",
"]",
"=",
"amendment",
"except",
"ValueError",
":",
"entry",
"[",
"'BooleanFieldsInterpreted'",
"]",
"=",
"False",
"return",
"entry"
] | Interpret data fields within a CO-TRACER expediture report.
Interpret the expenditure amount, expenditure date, filed date, amended,
and amendment fields of the provided entry. All dates (expenditure and
filed) are interpreted together and, if any fails, all will retain their
original value. Likewise, amended and amendment are interpreted together and
if one is malformed, both will retain their original value. Entry may be
edited in place and side-effects are possible in coupled code. However,
client code should use the return value to guard against future changes.
A value with the key 'AmountsInterpreted' will be set to True or False in
the returned entry if floating point values are successfully interpreted
(ExpenditureAmount) or not respectively.
A value with the key 'DatesInterpreted' will be set to True or False in
the returned entry if ISO 8601 strings are successfully interpreted
(ExpenditureDate and FiledDate) or not respectively.
A value with the key 'BooleanFieldsInterpreted' will be set to True or
False in the returned entry if boolean strings are successfully interpreted
(Amended and Amendment) or not respectively.
@param entry: The expenditure report data to manipulate / interpret.
@type entry: dict
@return: The entry passed
@raise ValueError: Raised if any expected field cannot be found in entry. | [
"Interpret",
"data",
"fields",
"within",
"a",
"CO",
"-",
"TRACER",
"expediture",
"report",
"."
] | python | train |
TomAugspurger/engarde | engarde/checks.py | https://github.com/TomAugspurger/engarde/blob/e7ea040cf0d20aee7ca4375b8c27caa2d9e43945/engarde/checks.py#L244-L272 | def one_to_many(df, unitcol, manycol):
"""
Assert that a many-to-one relationship is preserved between two
columns. For example, a retail store will have have distinct
departments, each with several employees. If each employee may
only work in a single department, then the relationship of the
department to the employees is one to many.
Parameters
==========
df : DataFrame
unitcol : str
The column that encapulates the groups in ``manycol``.
manycol : str
The column that must remain unique in the distict pairs
between ``manycol`` and ``unitcol``
Returns
=======
df : DataFrame
"""
subset = df[[manycol, unitcol]].drop_duplicates()
for many in subset[manycol].unique():
if subset[subset[manycol] == many].shape[0] > 1:
msg = "{} in {} has multiple values for {}".format(many, manycol, unitcol)
raise AssertionError(msg)
return df | [
"def",
"one_to_many",
"(",
"df",
",",
"unitcol",
",",
"manycol",
")",
":",
"subset",
"=",
"df",
"[",
"[",
"manycol",
",",
"unitcol",
"]",
"]",
".",
"drop_duplicates",
"(",
")",
"for",
"many",
"in",
"subset",
"[",
"manycol",
"]",
".",
"unique",
"(",
")",
":",
"if",
"subset",
"[",
"subset",
"[",
"manycol",
"]",
"==",
"many",
"]",
".",
"shape",
"[",
"0",
"]",
">",
"1",
":",
"msg",
"=",
"\"{} in {} has multiple values for {}\"",
".",
"format",
"(",
"many",
",",
"manycol",
",",
"unitcol",
")",
"raise",
"AssertionError",
"(",
"msg",
")",
"return",
"df"
] | Assert that a many-to-one relationship is preserved between two
columns. For example, a retail store will have have distinct
departments, each with several employees. If each employee may
only work in a single department, then the relationship of the
department to the employees is one to many.
Parameters
==========
df : DataFrame
unitcol : str
The column that encapulates the groups in ``manycol``.
manycol : str
The column that must remain unique in the distict pairs
between ``manycol`` and ``unitcol``
Returns
=======
df : DataFrame | [
"Assert",
"that",
"a",
"many",
"-",
"to",
"-",
"one",
"relationship",
"is",
"preserved",
"between",
"two",
"columns",
".",
"For",
"example",
"a",
"retail",
"store",
"will",
"have",
"have",
"distinct",
"departments",
"each",
"with",
"several",
"employees",
".",
"If",
"each",
"employee",
"may",
"only",
"work",
"in",
"a",
"single",
"department",
"then",
"the",
"relationship",
"of",
"the",
"department",
"to",
"the",
"employees",
"is",
"one",
"to",
"many",
"."
] | python | train |
thoughtworksarts/EmoPy | EmoPy/src/data_loader.py | https://github.com/thoughtworksarts/EmoPy/blob/a0ab97b3719ebe0a9de9bfc5adae5e46c9b77fd7/EmoPy/src/data_loader.py#L27-L39 | def _load_dataset(self, images, labels, emotion_index_map):
"""
Loads Dataset object with images, labels, and other data.
:param images: numpy array of image data
:param labels: numpy array of one-hot vector labels
:param emotion_index_map: map linking string/integer emotion class to integer index used in labels vectors
:return: Dataset object containing image and label data.
"""
train_images, test_images, train_labels, test_labels = train_test_split(images, labels, test_size=self.validation_split, random_state=42, stratify=labels)
dataset = Dataset(train_images, test_images, train_labels, test_labels, emotion_index_map, self.time_delay)
return dataset | [
"def",
"_load_dataset",
"(",
"self",
",",
"images",
",",
"labels",
",",
"emotion_index_map",
")",
":",
"train_images",
",",
"test_images",
",",
"train_labels",
",",
"test_labels",
"=",
"train_test_split",
"(",
"images",
",",
"labels",
",",
"test_size",
"=",
"self",
".",
"validation_split",
",",
"random_state",
"=",
"42",
",",
"stratify",
"=",
"labels",
")",
"dataset",
"=",
"Dataset",
"(",
"train_images",
",",
"test_images",
",",
"train_labels",
",",
"test_labels",
",",
"emotion_index_map",
",",
"self",
".",
"time_delay",
")",
"return",
"dataset"
] | Loads Dataset object with images, labels, and other data.
:param images: numpy array of image data
:param labels: numpy array of one-hot vector labels
:param emotion_index_map: map linking string/integer emotion class to integer index used in labels vectors
:return: Dataset object containing image and label data. | [
"Loads",
"Dataset",
"object",
"with",
"images",
"labels",
"and",
"other",
"data",
"."
] | python | train |
openego/ding0 | ding0/core/__init__.py | https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/__init__.py#L364-L469 | def build_lv_grid_district(self,
lv_load_area,
lv_grid_districts,
lv_stations):
"""Instantiates and associates lv_grid_district incl grid and station.
The instantiation creates more or less empty objects including relevant
data for transformer choice and grid creation
Parameters
----------
lv_load_area: :shapely:`Shapely Polygon object<polygons>`
load_area object
lv_grid_districts: :pandas:`pandas.DataFrame<dataframe>`
Table containing lv_grid_districts of according load_area
lv_stations : :pandas:`pandas.DataFrame<dataframe>`
Table containing lv_stations of according load_area
"""
# There's no LVGD for current LA
# -> TEMP WORKAROUND: Create single LVGD from LA, replace unknown valuess by zero
# TODO: Fix #155 (see also: data_processing #68)
if len(lv_grid_districts) == 0:
# raise ValueError(
# 'Load Area {} has no LVGD - please re-open #155'.format(
# repr(lv_load_area)))
geom = wkt_dumps(lv_load_area.geo_area)
lv_grid_districts = \
lv_grid_districts.append(
pd.DataFrame(
{'la_id': [lv_load_area.id_db],
'geom': [geom],
'population': [0],
'peak_load_residential': [lv_load_area.peak_load_residential],
'peak_load_retail': [lv_load_area.peak_load_retail],
'peak_load_industrial': [lv_load_area.peak_load_industrial],
'peak_load_agricultural': [lv_load_area.peak_load_agricultural],
'sector_count_residential': [0],
'sector_count_retail': [0],
'sector_count_industrial': [0],
'sector_count_agricultural': [0],
'sector_consumption_residential': [0],
'sector_consumption_retail': [0],
'sector_consumption_industrial': [0],
'sector_consumption_agricultural': [0]
},
index=[lv_load_area.id_db]
)
)
lv_nominal_voltage = cfg_ding0.get('assumptions', 'lv_nominal_voltage')
# Associate lv_grid_district to load_area
for id, row in lv_grid_districts.iterrows():
lv_grid_district = LVGridDistrictDing0(
id_db=id,
lv_load_area=lv_load_area,
geo_data=wkt_loads(row['geom']),
population=0 if isnan(row['population']) else int(row['population']),
peak_load_residential=row['peak_load_residential'],
peak_load_retail=row['peak_load_retail'],
peak_load_industrial=row['peak_load_industrial'],
peak_load_agricultural=row['peak_load_agricultural'],
peak_load=(row['peak_load_residential'] +
row['peak_load_retail'] +
row['peak_load_industrial'] +
row['peak_load_agricultural']),
sector_count_residential=int(row['sector_count_residential']),
sector_count_retail=int(row['sector_count_retail']),
sector_count_industrial=int(row['sector_count_industrial']),
sector_count_agricultural=int(row['sector_count_agricultural']),
sector_consumption_residential=row[
'sector_consumption_residential'],
sector_consumption_retail=row['sector_consumption_retail'],
sector_consumption_industrial=row[
'sector_consumption_industrial'],
sector_consumption_agricultural=row[
'sector_consumption_agricultural'])
# be aware, lv_grid takes grid district's geom!
lv_grid = LVGridDing0(network=self,
grid_district=lv_grid_district,
id_db=id,
geo_data=wkt_loads(row['geom']),
v_level=lv_nominal_voltage)
# create LV station
lv_station = LVStationDing0(
id_db=id,
grid=lv_grid,
lv_load_area=lv_load_area,
geo_data=wkt_loads(lv_stations.loc[id, 'geom'])
if id in lv_stations.index.values
else lv_load_area.geo_centre,
peak_load=lv_grid_district.peak_load)
# assign created objects
# note: creation of LV grid is done separately,
# see NetworkDing0.build_lv_grids()
lv_grid.add_station(lv_station)
lv_grid_district.lv_grid = lv_grid
lv_load_area.add_lv_grid_district(lv_grid_district) | [
"def",
"build_lv_grid_district",
"(",
"self",
",",
"lv_load_area",
",",
"lv_grid_districts",
",",
"lv_stations",
")",
":",
"# There's no LVGD for current LA",
"# -> TEMP WORKAROUND: Create single LVGD from LA, replace unknown valuess by zero",
"# TODO: Fix #155 (see also: data_processing #68)",
"if",
"len",
"(",
"lv_grid_districts",
")",
"==",
"0",
":",
"# raise ValueError(",
"# 'Load Area {} has no LVGD - please re-open #155'.format(",
"# repr(lv_load_area)))",
"geom",
"=",
"wkt_dumps",
"(",
"lv_load_area",
".",
"geo_area",
")",
"lv_grid_districts",
"=",
"lv_grid_districts",
".",
"append",
"(",
"pd",
".",
"DataFrame",
"(",
"{",
"'la_id'",
":",
"[",
"lv_load_area",
".",
"id_db",
"]",
",",
"'geom'",
":",
"[",
"geom",
"]",
",",
"'population'",
":",
"[",
"0",
"]",
",",
"'peak_load_residential'",
":",
"[",
"lv_load_area",
".",
"peak_load_residential",
"]",
",",
"'peak_load_retail'",
":",
"[",
"lv_load_area",
".",
"peak_load_retail",
"]",
",",
"'peak_load_industrial'",
":",
"[",
"lv_load_area",
".",
"peak_load_industrial",
"]",
",",
"'peak_load_agricultural'",
":",
"[",
"lv_load_area",
".",
"peak_load_agricultural",
"]",
",",
"'sector_count_residential'",
":",
"[",
"0",
"]",
",",
"'sector_count_retail'",
":",
"[",
"0",
"]",
",",
"'sector_count_industrial'",
":",
"[",
"0",
"]",
",",
"'sector_count_agricultural'",
":",
"[",
"0",
"]",
",",
"'sector_consumption_residential'",
":",
"[",
"0",
"]",
",",
"'sector_consumption_retail'",
":",
"[",
"0",
"]",
",",
"'sector_consumption_industrial'",
":",
"[",
"0",
"]",
",",
"'sector_consumption_agricultural'",
":",
"[",
"0",
"]",
"}",
",",
"index",
"=",
"[",
"lv_load_area",
".",
"id_db",
"]",
")",
")",
"lv_nominal_voltage",
"=",
"cfg_ding0",
".",
"get",
"(",
"'assumptions'",
",",
"'lv_nominal_voltage'",
")",
"# Associate lv_grid_district to load_area",
"for",
"id",
",",
"row",
"in",
"lv_grid_districts",
".",
"iterrows",
"(",
")",
":",
"lv_grid_district",
"=",
"LVGridDistrictDing0",
"(",
"id_db",
"=",
"id",
",",
"lv_load_area",
"=",
"lv_load_area",
",",
"geo_data",
"=",
"wkt_loads",
"(",
"row",
"[",
"'geom'",
"]",
")",
",",
"population",
"=",
"0",
"if",
"isnan",
"(",
"row",
"[",
"'population'",
"]",
")",
"else",
"int",
"(",
"row",
"[",
"'population'",
"]",
")",
",",
"peak_load_residential",
"=",
"row",
"[",
"'peak_load_residential'",
"]",
",",
"peak_load_retail",
"=",
"row",
"[",
"'peak_load_retail'",
"]",
",",
"peak_load_industrial",
"=",
"row",
"[",
"'peak_load_industrial'",
"]",
",",
"peak_load_agricultural",
"=",
"row",
"[",
"'peak_load_agricultural'",
"]",
",",
"peak_load",
"=",
"(",
"row",
"[",
"'peak_load_residential'",
"]",
"+",
"row",
"[",
"'peak_load_retail'",
"]",
"+",
"row",
"[",
"'peak_load_industrial'",
"]",
"+",
"row",
"[",
"'peak_load_agricultural'",
"]",
")",
",",
"sector_count_residential",
"=",
"int",
"(",
"row",
"[",
"'sector_count_residential'",
"]",
")",
",",
"sector_count_retail",
"=",
"int",
"(",
"row",
"[",
"'sector_count_retail'",
"]",
")",
",",
"sector_count_industrial",
"=",
"int",
"(",
"row",
"[",
"'sector_count_industrial'",
"]",
")",
",",
"sector_count_agricultural",
"=",
"int",
"(",
"row",
"[",
"'sector_count_agricultural'",
"]",
")",
",",
"sector_consumption_residential",
"=",
"row",
"[",
"'sector_consumption_residential'",
"]",
",",
"sector_consumption_retail",
"=",
"row",
"[",
"'sector_consumption_retail'",
"]",
",",
"sector_consumption_industrial",
"=",
"row",
"[",
"'sector_consumption_industrial'",
"]",
",",
"sector_consumption_agricultural",
"=",
"row",
"[",
"'sector_consumption_agricultural'",
"]",
")",
"# be aware, lv_grid takes grid district's geom!",
"lv_grid",
"=",
"LVGridDing0",
"(",
"network",
"=",
"self",
",",
"grid_district",
"=",
"lv_grid_district",
",",
"id_db",
"=",
"id",
",",
"geo_data",
"=",
"wkt_loads",
"(",
"row",
"[",
"'geom'",
"]",
")",
",",
"v_level",
"=",
"lv_nominal_voltage",
")",
"# create LV station",
"lv_station",
"=",
"LVStationDing0",
"(",
"id_db",
"=",
"id",
",",
"grid",
"=",
"lv_grid",
",",
"lv_load_area",
"=",
"lv_load_area",
",",
"geo_data",
"=",
"wkt_loads",
"(",
"lv_stations",
".",
"loc",
"[",
"id",
",",
"'geom'",
"]",
")",
"if",
"id",
"in",
"lv_stations",
".",
"index",
".",
"values",
"else",
"lv_load_area",
".",
"geo_centre",
",",
"peak_load",
"=",
"lv_grid_district",
".",
"peak_load",
")",
"# assign created objects",
"# note: creation of LV grid is done separately,",
"# see NetworkDing0.build_lv_grids()",
"lv_grid",
".",
"add_station",
"(",
"lv_station",
")",
"lv_grid_district",
".",
"lv_grid",
"=",
"lv_grid",
"lv_load_area",
".",
"add_lv_grid_district",
"(",
"lv_grid_district",
")"
] | Instantiates and associates lv_grid_district incl grid and station.
The instantiation creates more or less empty objects including relevant
data for transformer choice and grid creation
Parameters
----------
lv_load_area: :shapely:`Shapely Polygon object<polygons>`
load_area object
lv_grid_districts: :pandas:`pandas.DataFrame<dataframe>`
Table containing lv_grid_districts of according load_area
lv_stations : :pandas:`pandas.DataFrame<dataframe>`
Table containing lv_stations of according load_area | [
"Instantiates",
"and",
"associates",
"lv_grid_district",
"incl",
"grid",
"and",
"station",
".",
"The",
"instantiation",
"creates",
"more",
"or",
"less",
"empty",
"objects",
"including",
"relevant",
"data",
"for",
"transformer",
"choice",
"and",
"grid",
"creation"
] | python | train |
noxdafox/vminspect | vminspect/comparator.py | https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/comparator.py#L67-L103 | def compare(self, concurrent=False, identify=False, size=False):
"""Compares the two disks according to flags.
Generates the following report:
::
{'created_files': [{'path': '/file/in/disk1/not/in/disk0',
'sha1': 'sha1_of_the_file'}],
'deleted_files': [{'path': '/file/in/disk0/not/in/disk1',
'original_sha1': 'sha1_of_the_file'}],
'modified_files': [{'path': '/file/both/disks/but/different',
'sha1': 'sha1_of_the_file_on_disk0',
'original_sha1': 'sha1_of_the_file_on_disk0'}]}
If concurrent is set to True, the logic will use multiple CPUs to
speed up the process.
The identify and size keywords will add respectively the type
and the size of the files to the results.
"""
self.logger.debug("Comparing FS contents.")
results = compare_filesystems(self.filesystems[0], self.filesystems[1],
concurrent=concurrent)
if identify:
self.logger.debug("Gatering file types.")
results = files_type(self.filesystems[0], self.filesystems[1],
results)
if size:
self.logger.debug("Gatering file sizes.")
results = files_size(self.filesystems[0], self.filesystems[1],
results)
return results | [
"def",
"compare",
"(",
"self",
",",
"concurrent",
"=",
"False",
",",
"identify",
"=",
"False",
",",
"size",
"=",
"False",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Comparing FS contents.\"",
")",
"results",
"=",
"compare_filesystems",
"(",
"self",
".",
"filesystems",
"[",
"0",
"]",
",",
"self",
".",
"filesystems",
"[",
"1",
"]",
",",
"concurrent",
"=",
"concurrent",
")",
"if",
"identify",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Gatering file types.\"",
")",
"results",
"=",
"files_type",
"(",
"self",
".",
"filesystems",
"[",
"0",
"]",
",",
"self",
".",
"filesystems",
"[",
"1",
"]",
",",
"results",
")",
"if",
"size",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Gatering file sizes.\"",
")",
"results",
"=",
"files_size",
"(",
"self",
".",
"filesystems",
"[",
"0",
"]",
",",
"self",
".",
"filesystems",
"[",
"1",
"]",
",",
"results",
")",
"return",
"results"
] | Compares the two disks according to flags.
Generates the following report:
::
{'created_files': [{'path': '/file/in/disk1/not/in/disk0',
'sha1': 'sha1_of_the_file'}],
'deleted_files': [{'path': '/file/in/disk0/not/in/disk1',
'original_sha1': 'sha1_of_the_file'}],
'modified_files': [{'path': '/file/both/disks/but/different',
'sha1': 'sha1_of_the_file_on_disk0',
'original_sha1': 'sha1_of_the_file_on_disk0'}]}
If concurrent is set to True, the logic will use multiple CPUs to
speed up the process.
The identify and size keywords will add respectively the type
and the size of the files to the results. | [
"Compares",
"the",
"two",
"disks",
"according",
"to",
"flags",
"."
] | python | train |
python-bonobo/bonobo | bonobo/execution/contexts/node.py | https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/bonobo/execution/contexts/node.py#L254-L265 | def write(self, *messages):
"""
Push a message list to this context's input queue.
:param mixed value: message
"""
for message in messages:
if not isinstance(message, Token):
message = ensure_tuple(message, cls=self._input_type, length=self._input_length)
if self._input_length is None:
self._input_length = len(message)
self.input.put(message) | [
"def",
"write",
"(",
"self",
",",
"*",
"messages",
")",
":",
"for",
"message",
"in",
"messages",
":",
"if",
"not",
"isinstance",
"(",
"message",
",",
"Token",
")",
":",
"message",
"=",
"ensure_tuple",
"(",
"message",
",",
"cls",
"=",
"self",
".",
"_input_type",
",",
"length",
"=",
"self",
".",
"_input_length",
")",
"if",
"self",
".",
"_input_length",
"is",
"None",
":",
"self",
".",
"_input_length",
"=",
"len",
"(",
"message",
")",
"self",
".",
"input",
".",
"put",
"(",
"message",
")"
] | Push a message list to this context's input queue.
:param mixed value: message | [
"Push",
"a",
"message",
"list",
"to",
"this",
"context",
"s",
"input",
"queue",
"."
] | python | train |
42cc/bets-api | bets/__init__.py | https://github.com/42cc/bets-api/blob/63a8227c7d8c65eef9974374607bc34effff5c7c/bets/__init__.py#L141-L148 | def get_project_slug(self, bet):
'''Return slug of a project that given bet is associated with
or None if bet is not associated with any project.
'''
if bet.get('form_params'):
params = json.loads(bet['form_params'])
return params.get('project')
return None | [
"def",
"get_project_slug",
"(",
"self",
",",
"bet",
")",
":",
"if",
"bet",
".",
"get",
"(",
"'form_params'",
")",
":",
"params",
"=",
"json",
".",
"loads",
"(",
"bet",
"[",
"'form_params'",
"]",
")",
"return",
"params",
".",
"get",
"(",
"'project'",
")",
"return",
"None"
] | Return slug of a project that given bet is associated with
or None if bet is not associated with any project. | [
"Return",
"slug",
"of",
"a",
"project",
"that",
"given",
"bet",
"is",
"associated",
"with",
"or",
"None",
"if",
"bet",
"is",
"not",
"associated",
"with",
"any",
"project",
"."
] | python | valid |
rigetti/pyquil | examples/pointer.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/examples/pointer.py#L67-L74 | def fixup(p, data_bits, ptr_bits, bits_set):
"""
Flip back the pointer qubits that were previously flipped indicated by
the flags `bits_set`.
"""
for i in range(ptr_bits):
if 0 != bits_set & (1 << i):
p.inst(X(data_bits + i)) | [
"def",
"fixup",
"(",
"p",
",",
"data_bits",
",",
"ptr_bits",
",",
"bits_set",
")",
":",
"for",
"i",
"in",
"range",
"(",
"ptr_bits",
")",
":",
"if",
"0",
"!=",
"bits_set",
"&",
"(",
"1",
"<<",
"i",
")",
":",
"p",
".",
"inst",
"(",
"X",
"(",
"data_bits",
"+",
"i",
")",
")"
] | Flip back the pointer qubits that were previously flipped indicated by
the flags `bits_set`. | [
"Flip",
"back",
"the",
"pointer",
"qubits",
"that",
"were",
"previously",
"flipped",
"indicated",
"by",
"the",
"flags",
"bits_set",
"."
] | python | train |
oursky/norecaptcha | norecaptcha/captcha.py | https://github.com/oursky/norecaptcha/blob/6323054bf42c1bf35c5d7a7def4729cb32518860/norecaptcha/captcha.py#L98-L151 | def submit(recaptcha_response_field,
secret_key,
remoteip,
verify_server=VERIFY_SERVER):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_response_field -- The value from the form
secret_key -- your reCAPTCHA secret key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and len(recaptcha_response_field)):
return RecaptchaResponse(
is_valid=False,
error_code='incorrect-captcha-sol'
)
def encode_if_necessary(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
params = urllib.urlencode({
'secret': encode_if_necessary(secret_key),
'remoteip': encode_if_necessary(remoteip),
'response': encode_if_necessary(recaptcha_response_field),
})
request = Request(
url="https://%s/recaptcha/api/siteverify" % verify_server,
data=params,
headers={
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "noReCAPTCHA Python"
}
)
httpresp = urlopen(request)
return_values = json.loads(httpresp.read())
httpresp.close()
return_code = return_values['success']
error_codes = return_values.get('error-codes', [])
if return_code:
return RecaptchaResponse(is_valid=True)
else:
return RecaptchaResponse(
is_valid=False,
error_code=error_codes
) | [
"def",
"submit",
"(",
"recaptcha_response_field",
",",
"secret_key",
",",
"remoteip",
",",
"verify_server",
"=",
"VERIFY_SERVER",
")",
":",
"if",
"not",
"(",
"recaptcha_response_field",
"and",
"len",
"(",
"recaptcha_response_field",
")",
")",
":",
"return",
"RecaptchaResponse",
"(",
"is_valid",
"=",
"False",
",",
"error_code",
"=",
"'incorrect-captcha-sol'",
")",
"def",
"encode_if_necessary",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"unicode",
")",
":",
"return",
"s",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"s",
"params",
"=",
"urllib",
".",
"urlencode",
"(",
"{",
"'secret'",
":",
"encode_if_necessary",
"(",
"secret_key",
")",
",",
"'remoteip'",
":",
"encode_if_necessary",
"(",
"remoteip",
")",
",",
"'response'",
":",
"encode_if_necessary",
"(",
"recaptcha_response_field",
")",
",",
"}",
")",
"request",
"=",
"Request",
"(",
"url",
"=",
"\"https://%s/recaptcha/api/siteverify\"",
"%",
"verify_server",
",",
"data",
"=",
"params",
",",
"headers",
"=",
"{",
"\"Content-type\"",
":",
"\"application/x-www-form-urlencoded\"",
",",
"\"User-agent\"",
":",
"\"noReCAPTCHA Python\"",
"}",
")",
"httpresp",
"=",
"urlopen",
"(",
"request",
")",
"return_values",
"=",
"json",
".",
"loads",
"(",
"httpresp",
".",
"read",
"(",
")",
")",
"httpresp",
".",
"close",
"(",
")",
"return_code",
"=",
"return_values",
"[",
"'success'",
"]",
"error_codes",
"=",
"return_values",
".",
"get",
"(",
"'error-codes'",
",",
"[",
"]",
")",
"if",
"return_code",
":",
"return",
"RecaptchaResponse",
"(",
"is_valid",
"=",
"True",
")",
"else",
":",
"return",
"RecaptchaResponse",
"(",
"is_valid",
"=",
"False",
",",
"error_code",
"=",
"error_codes",
")"
] | Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_response_field -- The value from the form
secret_key -- your reCAPTCHA secret key
remoteip -- the user's ip address | [
"Submits",
"a",
"reCAPTCHA",
"request",
"for",
"verification",
".",
"Returns",
"RecaptchaResponse",
"for",
"the",
"request"
] | python | train |
pytroll/satpy | satpy/readers/goes_imager_nc.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/goes_imager_nc.py#L682-L689 | def _is_yaw_flip(lat, delta=10):
"""Determine whether the satellite is yaw-flipped ('upside down')"""
logger.debug('Computing yaw flip flag')
# In case of yaw-flip the data and coordinates in the netCDF files are
# also flipped. Just check whether the latitude increases or decrases
# with the line number.
crow, ccol = np.array(lat.shape) // 2
return (lat[crow+delta, ccol] - lat[crow, ccol]).values > 0 | [
"def",
"_is_yaw_flip",
"(",
"lat",
",",
"delta",
"=",
"10",
")",
":",
"logger",
".",
"debug",
"(",
"'Computing yaw flip flag'",
")",
"# In case of yaw-flip the data and coordinates in the netCDF files are",
"# also flipped. Just check whether the latitude increases or decrases",
"# with the line number.",
"crow",
",",
"ccol",
"=",
"np",
".",
"array",
"(",
"lat",
".",
"shape",
")",
"//",
"2",
"return",
"(",
"lat",
"[",
"crow",
"+",
"delta",
",",
"ccol",
"]",
"-",
"lat",
"[",
"crow",
",",
"ccol",
"]",
")",
".",
"values",
">",
"0"
] | Determine whether the satellite is yaw-flipped ('upside down') | [
"Determine",
"whether",
"the",
"satellite",
"is",
"yaw",
"-",
"flipped",
"(",
"upside",
"down",
")"
] | python | train |
lambdalisue/maidenhair | src/maidenhair/functions.py | https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/functions.py#L24-L145 | def load(pathname, using=None, unite=False, basecolumn=0,
relative=False, baseline=None,
parser=None, loader=None,
with_filename=False, recursive=False, natsort=True, **kwargs):
"""
Load data from file matched with given glob pattern.
Return value will be a list of data unless :attr:`unite` is `True`.
If :attr:`unite` is `True` then all data will be united into a single data.
Parameters
----------
pathname : string or list
A glob pattern or a list of glob pattern which will be used to load
data.
using : integer list or slice instance, optional
A list of index or slice instance which will be used to slice data
columns.
unite : boolean, optional:
If it is `True` then dataset will be united into a single numpy
array. See usage for more detail.
basecolumn : integer, optional
An index of base column. all data will be trimmed based on the order
of this column when the number of samples are different among the
dataset.
It only affect when :attr:`unite` is specified as `True`.
relative : boolean, optional
Make the dataset relative to the first data by using
:func:`maidenhair.filters.relative.relative` function.
baseline : function, None, optional
A function which will take data columns and return regulated data
columns.
It is useful to regulate baseline of each data in dataset.
parser : instance, string, None, optional
An instance or registered name of parser class.
If it is not specified, default parser specified with
:func:`maidenhair.functions.set_default_parser` will be used instead.
loader : instance, string, None, optional
An instance or registered name of loader class.
If it is not specified, default loader specified with
:func:`maidenhair.functions.set_default_loader` will be used instead.
with_filename : boolean, optional
If it is `True`, returning dataset will contain filename in the
first column.
It is cannot be used with :attr:`unite = True`
recursive : boolean, optional
Recursively find pattern in the directory
natsort : boolean
Naturally sort found files.
Returns
-------
list
A list of numpy array
Examples
--------
Assume that there are five independent experimental data for three types
of samples, namely there are fifteen data.
Each data file would have two direction (X and Y) and 100 data points.
Its filenames would be formatted as
`<type number>.<experimental number>.txt`
and save in `tests/fixtures` directory.
Then the loading code will be
>>> import maidenhair
>>> dataset = []
>>> dataset += maidenhair.load('tests/fixtures/1.*.txt',
... unite=True, using=(0, 1))
>>> dataset += maidenhair.load('tests/fixtures/2.*.txt',
... unite=True, using=(0, 1))
>>> dataset += maidenhair.load('tests/fixtures/3.*.txt',
... unite=True, using=(0, 1))
>>> len(dataset) # number of samples
3
>>> len(dataset[0]) # number of axis (X and Y)
2
>>> len(dataset[0][0]) # number of data points
100
>>> len(dataset[0][0][0]) # number of columns
5
Without using `unite=True`, the dataset will be
>>> import numpy as np
>>> import maidenhair
>>> dataset = []
>>> dataset += maidenhair.load('tests/fixtures/1.*.txt', using=(0, 1))
>>> dataset += maidenhair.load('tests/fixtures/2.*.txt', using=(0, 1))
>>> dataset += maidenhair.load('tests/fixtures/3.*.txt', using=(0, 1))
>>> len(dataset) # number of samples
15
>>> len(dataset[0]) # number of axis (X and Y)
2
>>> len(dataset[0][0]) # number of data points
100
>>> isinstance(dataset[0][0][0], np.float64)
True
"""
parser = parser or get_default_parser()
loader = loader or get_default_loader()
# make sure the pathname is a list
if not isinstance(pathname, (list, tuple)):
pathname = [pathname]
dataset = []
for _pathname in pathname:
dataset += loader.glob(_pathname,
using=using, parser=parser,
unite=unite, basecolumn=basecolumn,
with_filename=with_filename,
recursive=recursive,
natsort=natsort,
**kwargs)
if relative:
from maidenhair.filters import relative
dataset = relative(dataset)
if baseline is not None:
for i, data in enumerate(dataset):
dataset[i] = baseline(data)
return dataset | [
"def",
"load",
"(",
"pathname",
",",
"using",
"=",
"None",
",",
"unite",
"=",
"False",
",",
"basecolumn",
"=",
"0",
",",
"relative",
"=",
"False",
",",
"baseline",
"=",
"None",
",",
"parser",
"=",
"None",
",",
"loader",
"=",
"None",
",",
"with_filename",
"=",
"False",
",",
"recursive",
"=",
"False",
",",
"natsort",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"parser",
"=",
"parser",
"or",
"get_default_parser",
"(",
")",
"loader",
"=",
"loader",
"or",
"get_default_loader",
"(",
")",
"# make sure the pathname is a list",
"if",
"not",
"isinstance",
"(",
"pathname",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"pathname",
"=",
"[",
"pathname",
"]",
"dataset",
"=",
"[",
"]",
"for",
"_pathname",
"in",
"pathname",
":",
"dataset",
"+=",
"loader",
".",
"glob",
"(",
"_pathname",
",",
"using",
"=",
"using",
",",
"parser",
"=",
"parser",
",",
"unite",
"=",
"unite",
",",
"basecolumn",
"=",
"basecolumn",
",",
"with_filename",
"=",
"with_filename",
",",
"recursive",
"=",
"recursive",
",",
"natsort",
"=",
"natsort",
",",
"*",
"*",
"kwargs",
")",
"if",
"relative",
":",
"from",
"maidenhair",
".",
"filters",
"import",
"relative",
"dataset",
"=",
"relative",
"(",
"dataset",
")",
"if",
"baseline",
"is",
"not",
"None",
":",
"for",
"i",
",",
"data",
"in",
"enumerate",
"(",
"dataset",
")",
":",
"dataset",
"[",
"i",
"]",
"=",
"baseline",
"(",
"data",
")",
"return",
"dataset"
] | Load data from file matched with given glob pattern.
Return value will be a list of data unless :attr:`unite` is `True`.
If :attr:`unite` is `True` then all data will be united into a single data.
Parameters
----------
pathname : string or list
A glob pattern or a list of glob pattern which will be used to load
data.
using : integer list or slice instance, optional
A list of index or slice instance which will be used to slice data
columns.
unite : boolean, optional:
If it is `True` then dataset will be united into a single numpy
array. See usage for more detail.
basecolumn : integer, optional
An index of base column. all data will be trimmed based on the order
of this column when the number of samples are different among the
dataset.
It only affect when :attr:`unite` is specified as `True`.
relative : boolean, optional
Make the dataset relative to the first data by using
:func:`maidenhair.filters.relative.relative` function.
baseline : function, None, optional
A function which will take data columns and return regulated data
columns.
It is useful to regulate baseline of each data in dataset.
parser : instance, string, None, optional
An instance or registered name of parser class.
If it is not specified, default parser specified with
:func:`maidenhair.functions.set_default_parser` will be used instead.
loader : instance, string, None, optional
An instance or registered name of loader class.
If it is not specified, default loader specified with
:func:`maidenhair.functions.set_default_loader` will be used instead.
with_filename : boolean, optional
If it is `True`, returning dataset will contain filename in the
first column.
It is cannot be used with :attr:`unite = True`
recursive : boolean, optional
Recursively find pattern in the directory
natsort : boolean
Naturally sort found files.
Returns
-------
list
A list of numpy array
Examples
--------
Assume that there are five independent experimental data for three types
of samples, namely there are fifteen data.
Each data file would have two direction (X and Y) and 100 data points.
Its filenames would be formatted as
`<type number>.<experimental number>.txt`
and save in `tests/fixtures` directory.
Then the loading code will be
>>> import maidenhair
>>> dataset = []
>>> dataset += maidenhair.load('tests/fixtures/1.*.txt',
... unite=True, using=(0, 1))
>>> dataset += maidenhair.load('tests/fixtures/2.*.txt',
... unite=True, using=(0, 1))
>>> dataset += maidenhair.load('tests/fixtures/3.*.txt',
... unite=True, using=(0, 1))
>>> len(dataset) # number of samples
3
>>> len(dataset[0]) # number of axis (X and Y)
2
>>> len(dataset[0][0]) # number of data points
100
>>> len(dataset[0][0][0]) # number of columns
5
Without using `unite=True`, the dataset will be
>>> import numpy as np
>>> import maidenhair
>>> dataset = []
>>> dataset += maidenhair.load('tests/fixtures/1.*.txt', using=(0, 1))
>>> dataset += maidenhair.load('tests/fixtures/2.*.txt', using=(0, 1))
>>> dataset += maidenhair.load('tests/fixtures/3.*.txt', using=(0, 1))
>>> len(dataset) # number of samples
15
>>> len(dataset[0]) # number of axis (X and Y)
2
>>> len(dataset[0][0]) # number of data points
100
>>> isinstance(dataset[0][0][0], np.float64)
True | [
"Load",
"data",
"from",
"file",
"matched",
"with",
"given",
"glob",
"pattern",
"."
] | python | train |
PredixDev/predixpy | predix/data/eventhub/publisher.py | https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/data/eventhub/publisher.py#L234-L242 | def _auto_send(self):
"""
auto send blocking function, when the interval or the message size has been reached, publish
:return:
"""
while True:
if time.time() - self.last_send_time > self.config.async_auto_send_interval_millis or \
len(self._tx_queue) >= self.config.async_auto_send_amount:
self.publish_queue() | [
"def",
"_auto_send",
"(",
"self",
")",
":",
"while",
"True",
":",
"if",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"last_send_time",
">",
"self",
".",
"config",
".",
"async_auto_send_interval_millis",
"or",
"len",
"(",
"self",
".",
"_tx_queue",
")",
">=",
"self",
".",
"config",
".",
"async_auto_send_amount",
":",
"self",
".",
"publish_queue",
"(",
")"
] | auto send blocking function, when the interval or the message size has been reached, publish
:return: | [
"auto",
"send",
"blocking",
"function",
"when",
"the",
"interval",
"or",
"the",
"message",
"size",
"has",
"been",
"reached",
"publish",
":",
"return",
":"
] | python | train |
miguelgrinberg/flask-paranoid | flask_paranoid/paranoid.py | https://github.com/miguelgrinberg/flask-paranoid/blob/ec6205756d55edd1b135249b9bb345871fef0977/flask_paranoid/paranoid.py#L99-L113 | def clear_session(self, response):
"""Clear the session.
This method is invoked when the session is found to be invalid.
Subclasses can override this method to implement a custom session
reset.
"""
session.clear()
# if flask-login is installed, we try to clear the
# "remember me" cookie, just in case it is set
if 'flask_login' in sys.modules:
remember_cookie = current_app.config.get('REMEMBER_COOKIE',
'remember_token')
response.set_cookie(remember_cookie, '', expires=0, max_age=0) | [
"def",
"clear_session",
"(",
"self",
",",
"response",
")",
":",
"session",
".",
"clear",
"(",
")",
"# if flask-login is installed, we try to clear the",
"# \"remember me\" cookie, just in case it is set",
"if",
"'flask_login'",
"in",
"sys",
".",
"modules",
":",
"remember_cookie",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'REMEMBER_COOKIE'",
",",
"'remember_token'",
")",
"response",
".",
"set_cookie",
"(",
"remember_cookie",
",",
"''",
",",
"expires",
"=",
"0",
",",
"max_age",
"=",
"0",
")"
] | Clear the session.
This method is invoked when the session is found to be invalid.
Subclasses can override this method to implement a custom session
reset. | [
"Clear",
"the",
"session",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/visualization.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/visualization.py#L47-L209 | def print_summary(symbol, shape=None, line_length=120, positions=[.44, .64, .74, 1.]):
"""Convert symbol for detail information.
Parameters
----------
symbol: Symbol
Symbol to be visualized.
shape: dict
A dict of shapes, str->shape (tuple), given input shapes.
line_length: int
Rotal length of printed lines
positions: list
Relative or absolute positions of log elements in each line.
Returns
------
None
Notes
-----
If ``mxnet`` is imported, the visualization module can be used in its short-form.
For example, if we ``import mxnet`` as follows::
import mxnet
this method in visualization module can be used in its short-form as::
mxnet.viz.print_summary(...)
"""
if not isinstance(symbol, Symbol):
raise TypeError("symbol must be Symbol")
show_shape = False
if shape is not None:
show_shape = True
interals = symbol.get_internals()
_, out_shapes, _ = interals.infer_shape(**shape)
if out_shapes is None:
raise ValueError("Input shape is incomplete")
shape_dict = dict(zip(interals.list_outputs(), out_shapes))
conf = json.loads(symbol.tojson())
nodes = conf["nodes"]
heads = set(conf["heads"][0])
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Previous Layer']
def print_row(fields, positions):
"""Print format row.
Parameters
----------
fields: list
Information field.
positions: list
Field length ratio.
Returns
------
None
"""
line = ''
for i, field in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('_' * line_length)
print_row(to_display, positions)
print('=' * line_length)
def print_layer_summary(node, out_shape):
"""print layer information
Parameters
----------
node: dict
Node information.
out_shape: dict
Node shape information.
Returns
------
Node total parameters.
"""
op = node["op"]
pre_node = []
pre_filter = 0
if op != "null":
inputs = node["inputs"]
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node["name"]
if input_node["op"] != "null" or item[0] in heads:
# add precede
pre_node.append(input_name)
if show_shape:
if input_node["op"] != "null":
key = input_name + "_output"
else:
key = input_name
if key in shape_dict:
shape = shape_dict[key][1:]
pre_filter = pre_filter + int(shape[0])
cur_param = 0
if op == 'Convolution':
if "no_bias" in node["attrs"] and node["attrs"]["no_bias"] == 'True':
num_group = int(node['attrs'].get('num_group', '1'))
cur_param = pre_filter * int(node["attrs"]["num_filter"]) \
// num_group
for k in _str2tuple(node["attrs"]["kernel"]):
cur_param *= int(k)
else:
num_group = int(node['attrs'].get('num_group', '1'))
cur_param = pre_filter * int(node["attrs"]["num_filter"]) \
// num_group
for k in _str2tuple(node["attrs"]["kernel"]):
cur_param *= int(k)
cur_param += int(node["attrs"]["num_filter"])
elif op == 'FullyConnected':
if "no_bias" in node["attrs"] and node["attrs"]["no_bias"] == 'True':
cur_param = pre_filter * int(node["attrs"]["num_hidden"])
else:
cur_param = (pre_filter+1) * int(node["attrs"]["num_hidden"])
elif op == 'BatchNorm':
key = node["name"] + "_output"
if show_shape:
num_filter = shape_dict[key][1]
cur_param = int(num_filter) * 2
elif op == 'Embedding':
cur_param = int(node["attrs"]['input_dim']) * int(node["attrs"]['output_dim'])
if not pre_node:
first_connection = ''
else:
first_connection = pre_node[0]
fields = [node['name'] + '(' + op + ')',
"x".join([str(x) for x in out_shape]),
cur_param,
first_connection]
print_row(fields, positions)
if len(pre_node) > 1:
for i in range(1, len(pre_node)):
fields = ['', '', '', pre_node[i]]
print_row(fields, positions)
return cur_param
total_params = 0
for i, node in enumerate(nodes):
out_shape = []
op = node["op"]
if op == "null" and i > 0:
continue
if op != "null" or i in heads:
if show_shape:
if op != "null":
key = node["name"] + "_output"
else:
key = node["name"]
if key in shape_dict:
out_shape = shape_dict[key][1:]
total_params += print_layer_summary(nodes[i], out_shape)
if i == len(nodes) - 1:
print('=' * line_length)
else:
print('_' * line_length)
print("Total params: {params}".format(params=total_params))
print('_' * line_length) | [
"def",
"print_summary",
"(",
"symbol",
",",
"shape",
"=",
"None",
",",
"line_length",
"=",
"120",
",",
"positions",
"=",
"[",
".44",
",",
".64",
",",
".74",
",",
"1.",
"]",
")",
":",
"if",
"not",
"isinstance",
"(",
"symbol",
",",
"Symbol",
")",
":",
"raise",
"TypeError",
"(",
"\"symbol must be Symbol\"",
")",
"show_shape",
"=",
"False",
"if",
"shape",
"is",
"not",
"None",
":",
"show_shape",
"=",
"True",
"interals",
"=",
"symbol",
".",
"get_internals",
"(",
")",
"_",
",",
"out_shapes",
",",
"_",
"=",
"interals",
".",
"infer_shape",
"(",
"*",
"*",
"shape",
")",
"if",
"out_shapes",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Input shape is incomplete\"",
")",
"shape_dict",
"=",
"dict",
"(",
"zip",
"(",
"interals",
".",
"list_outputs",
"(",
")",
",",
"out_shapes",
")",
")",
"conf",
"=",
"json",
".",
"loads",
"(",
"symbol",
".",
"tojson",
"(",
")",
")",
"nodes",
"=",
"conf",
"[",
"\"nodes\"",
"]",
"heads",
"=",
"set",
"(",
"conf",
"[",
"\"heads\"",
"]",
"[",
"0",
"]",
")",
"if",
"positions",
"[",
"-",
"1",
"]",
"<=",
"1",
":",
"positions",
"=",
"[",
"int",
"(",
"line_length",
"*",
"p",
")",
"for",
"p",
"in",
"positions",
"]",
"# header names for the different log elements",
"to_display",
"=",
"[",
"'Layer (type)'",
",",
"'Output Shape'",
",",
"'Param #'",
",",
"'Previous Layer'",
"]",
"def",
"print_row",
"(",
"fields",
",",
"positions",
")",
":",
"\"\"\"Print format row.\n\n Parameters\n ----------\n fields: list\n Information field.\n positions: list\n Field length ratio.\n Returns\n ------\n None\n \"\"\"",
"line",
"=",
"''",
"for",
"i",
",",
"field",
"in",
"enumerate",
"(",
"fields",
")",
":",
"line",
"+=",
"str",
"(",
"field",
")",
"line",
"=",
"line",
"[",
":",
"positions",
"[",
"i",
"]",
"]",
"line",
"+=",
"' '",
"*",
"(",
"positions",
"[",
"i",
"]",
"-",
"len",
"(",
"line",
")",
")",
"print",
"(",
"line",
")",
"print",
"(",
"'_'",
"*",
"line_length",
")",
"print_row",
"(",
"to_display",
",",
"positions",
")",
"print",
"(",
"'='",
"*",
"line_length",
")",
"def",
"print_layer_summary",
"(",
"node",
",",
"out_shape",
")",
":",
"\"\"\"print layer information\n\n Parameters\n ----------\n node: dict\n Node information.\n out_shape: dict\n Node shape information.\n Returns\n ------\n Node total parameters.\n \"\"\"",
"op",
"=",
"node",
"[",
"\"op\"",
"]",
"pre_node",
"=",
"[",
"]",
"pre_filter",
"=",
"0",
"if",
"op",
"!=",
"\"null\"",
":",
"inputs",
"=",
"node",
"[",
"\"inputs\"",
"]",
"for",
"item",
"in",
"inputs",
":",
"input_node",
"=",
"nodes",
"[",
"item",
"[",
"0",
"]",
"]",
"input_name",
"=",
"input_node",
"[",
"\"name\"",
"]",
"if",
"input_node",
"[",
"\"op\"",
"]",
"!=",
"\"null\"",
"or",
"item",
"[",
"0",
"]",
"in",
"heads",
":",
"# add precede",
"pre_node",
".",
"append",
"(",
"input_name",
")",
"if",
"show_shape",
":",
"if",
"input_node",
"[",
"\"op\"",
"]",
"!=",
"\"null\"",
":",
"key",
"=",
"input_name",
"+",
"\"_output\"",
"else",
":",
"key",
"=",
"input_name",
"if",
"key",
"in",
"shape_dict",
":",
"shape",
"=",
"shape_dict",
"[",
"key",
"]",
"[",
"1",
":",
"]",
"pre_filter",
"=",
"pre_filter",
"+",
"int",
"(",
"shape",
"[",
"0",
"]",
")",
"cur_param",
"=",
"0",
"if",
"op",
"==",
"'Convolution'",
":",
"if",
"\"no_bias\"",
"in",
"node",
"[",
"\"attrs\"",
"]",
"and",
"node",
"[",
"\"attrs\"",
"]",
"[",
"\"no_bias\"",
"]",
"==",
"'True'",
":",
"num_group",
"=",
"int",
"(",
"node",
"[",
"'attrs'",
"]",
".",
"get",
"(",
"'num_group'",
",",
"'1'",
")",
")",
"cur_param",
"=",
"pre_filter",
"*",
"int",
"(",
"node",
"[",
"\"attrs\"",
"]",
"[",
"\"num_filter\"",
"]",
")",
"//",
"num_group",
"for",
"k",
"in",
"_str2tuple",
"(",
"node",
"[",
"\"attrs\"",
"]",
"[",
"\"kernel\"",
"]",
")",
":",
"cur_param",
"*=",
"int",
"(",
"k",
")",
"else",
":",
"num_group",
"=",
"int",
"(",
"node",
"[",
"'attrs'",
"]",
".",
"get",
"(",
"'num_group'",
",",
"'1'",
")",
")",
"cur_param",
"=",
"pre_filter",
"*",
"int",
"(",
"node",
"[",
"\"attrs\"",
"]",
"[",
"\"num_filter\"",
"]",
")",
"//",
"num_group",
"for",
"k",
"in",
"_str2tuple",
"(",
"node",
"[",
"\"attrs\"",
"]",
"[",
"\"kernel\"",
"]",
")",
":",
"cur_param",
"*=",
"int",
"(",
"k",
")",
"cur_param",
"+=",
"int",
"(",
"node",
"[",
"\"attrs\"",
"]",
"[",
"\"num_filter\"",
"]",
")",
"elif",
"op",
"==",
"'FullyConnected'",
":",
"if",
"\"no_bias\"",
"in",
"node",
"[",
"\"attrs\"",
"]",
"and",
"node",
"[",
"\"attrs\"",
"]",
"[",
"\"no_bias\"",
"]",
"==",
"'True'",
":",
"cur_param",
"=",
"pre_filter",
"*",
"int",
"(",
"node",
"[",
"\"attrs\"",
"]",
"[",
"\"num_hidden\"",
"]",
")",
"else",
":",
"cur_param",
"=",
"(",
"pre_filter",
"+",
"1",
")",
"*",
"int",
"(",
"node",
"[",
"\"attrs\"",
"]",
"[",
"\"num_hidden\"",
"]",
")",
"elif",
"op",
"==",
"'BatchNorm'",
":",
"key",
"=",
"node",
"[",
"\"name\"",
"]",
"+",
"\"_output\"",
"if",
"show_shape",
":",
"num_filter",
"=",
"shape_dict",
"[",
"key",
"]",
"[",
"1",
"]",
"cur_param",
"=",
"int",
"(",
"num_filter",
")",
"*",
"2",
"elif",
"op",
"==",
"'Embedding'",
":",
"cur_param",
"=",
"int",
"(",
"node",
"[",
"\"attrs\"",
"]",
"[",
"'input_dim'",
"]",
")",
"*",
"int",
"(",
"node",
"[",
"\"attrs\"",
"]",
"[",
"'output_dim'",
"]",
")",
"if",
"not",
"pre_node",
":",
"first_connection",
"=",
"''",
"else",
":",
"first_connection",
"=",
"pre_node",
"[",
"0",
"]",
"fields",
"=",
"[",
"node",
"[",
"'name'",
"]",
"+",
"'('",
"+",
"op",
"+",
"')'",
",",
"\"x\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"out_shape",
"]",
")",
",",
"cur_param",
",",
"first_connection",
"]",
"print_row",
"(",
"fields",
",",
"positions",
")",
"if",
"len",
"(",
"pre_node",
")",
">",
"1",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"pre_node",
")",
")",
":",
"fields",
"=",
"[",
"''",
",",
"''",
",",
"''",
",",
"pre_node",
"[",
"i",
"]",
"]",
"print_row",
"(",
"fields",
",",
"positions",
")",
"return",
"cur_param",
"total_params",
"=",
"0",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"nodes",
")",
":",
"out_shape",
"=",
"[",
"]",
"op",
"=",
"node",
"[",
"\"op\"",
"]",
"if",
"op",
"==",
"\"null\"",
"and",
"i",
">",
"0",
":",
"continue",
"if",
"op",
"!=",
"\"null\"",
"or",
"i",
"in",
"heads",
":",
"if",
"show_shape",
":",
"if",
"op",
"!=",
"\"null\"",
":",
"key",
"=",
"node",
"[",
"\"name\"",
"]",
"+",
"\"_output\"",
"else",
":",
"key",
"=",
"node",
"[",
"\"name\"",
"]",
"if",
"key",
"in",
"shape_dict",
":",
"out_shape",
"=",
"shape_dict",
"[",
"key",
"]",
"[",
"1",
":",
"]",
"total_params",
"+=",
"print_layer_summary",
"(",
"nodes",
"[",
"i",
"]",
",",
"out_shape",
")",
"if",
"i",
"==",
"len",
"(",
"nodes",
")",
"-",
"1",
":",
"print",
"(",
"'='",
"*",
"line_length",
")",
"else",
":",
"print",
"(",
"'_'",
"*",
"line_length",
")",
"print",
"(",
"\"Total params: {params}\"",
".",
"format",
"(",
"params",
"=",
"total_params",
")",
")",
"print",
"(",
"'_'",
"*",
"line_length",
")"
] | Convert symbol for detail information.
Parameters
----------
symbol: Symbol
Symbol to be visualized.
shape: dict
A dict of shapes, str->shape (tuple), given input shapes.
line_length: int
Rotal length of printed lines
positions: list
Relative or absolute positions of log elements in each line.
Returns
------
None
Notes
-----
If ``mxnet`` is imported, the visualization module can be used in its short-form.
For example, if we ``import mxnet`` as follows::
import mxnet
this method in visualization module can be used in its short-form as::
mxnet.viz.print_summary(...) | [
"Convert",
"symbol",
"for",
"detail",
"information",
"."
] | python | train |
allelos/vectors | vectors/vectors.py | https://github.com/allelos/vectors/blob/55db2a7e489ae5f4380e70b3c5b7a6ce39de5cee/vectors/vectors.py#L227-L232 | def from_points(cls, point1, point2):
"""Return a Vector instance from two given points."""
if isinstance(point1, Point) and isinstance(point2, Point):
displacement = point1.substract(point2)
return cls(displacement.x, displacement.y, displacement.z)
raise TypeError | [
"def",
"from_points",
"(",
"cls",
",",
"point1",
",",
"point2",
")",
":",
"if",
"isinstance",
"(",
"point1",
",",
"Point",
")",
"and",
"isinstance",
"(",
"point2",
",",
"Point",
")",
":",
"displacement",
"=",
"point1",
".",
"substract",
"(",
"point2",
")",
"return",
"cls",
"(",
"displacement",
".",
"x",
",",
"displacement",
".",
"y",
",",
"displacement",
".",
"z",
")",
"raise",
"TypeError"
] | Return a Vector instance from two given points. | [
"Return",
"a",
"Vector",
"instance",
"from",
"two",
"given",
"points",
"."
] | python | train |
square/pylink | pylink/jlink.py | https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L4805-L4830 | def rtt_read(self, buffer_index, num_bytes):
"""Reads data from the RTT buffer.
This method will read at most num_bytes bytes from the specified
RTT buffer. The data is automatically removed from the RTT buffer.
If there are not num_bytes bytes waiting in the RTT buffer, the
entire contents of the RTT buffer will be read.
Args:
self (JLink): the ``JLink`` instance
buffer_index (int): the index of the RTT buffer to read from
num_bytes (int): the maximum number of bytes to read
Returns:
A list of bytes read from RTT.
Raises:
JLinkRTTException if the underlying JLINK_RTTERMINAL_Read call fails.
"""
buf = (ctypes.c_ubyte * num_bytes)()
bytes_read = self._dll.JLINK_RTTERMINAL_Read(buffer_index, buf, num_bytes)
if bytes_read < 0:
raise errors.JLinkRTTException(bytes_read)
return list(buf)[:bytes_read] | [
"def",
"rtt_read",
"(",
"self",
",",
"buffer_index",
",",
"num_bytes",
")",
":",
"buf",
"=",
"(",
"ctypes",
".",
"c_ubyte",
"*",
"num_bytes",
")",
"(",
")",
"bytes_read",
"=",
"self",
".",
"_dll",
".",
"JLINK_RTTERMINAL_Read",
"(",
"buffer_index",
",",
"buf",
",",
"num_bytes",
")",
"if",
"bytes_read",
"<",
"0",
":",
"raise",
"errors",
".",
"JLinkRTTException",
"(",
"bytes_read",
")",
"return",
"list",
"(",
"buf",
")",
"[",
":",
"bytes_read",
"]"
] | Reads data from the RTT buffer.
This method will read at most num_bytes bytes from the specified
RTT buffer. The data is automatically removed from the RTT buffer.
If there are not num_bytes bytes waiting in the RTT buffer, the
entire contents of the RTT buffer will be read.
Args:
self (JLink): the ``JLink`` instance
buffer_index (int): the index of the RTT buffer to read from
num_bytes (int): the maximum number of bytes to read
Returns:
A list of bytes read from RTT.
Raises:
JLinkRTTException if the underlying JLINK_RTTERMINAL_Read call fails. | [
"Reads",
"data",
"from",
"the",
"RTT",
"buffer",
"."
] | python | train |
icometrix/dicom2nifti | dicom2nifti/common.py | https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L167-L195 | def is_valid_imaging_dicom(dicom_header):
"""
Function will do some basic checks to see if this is a valid imaging dicom
"""
# if it is philips and multiframe dicom then we assume it is ok
try:
if is_philips([dicom_header]):
if is_multiframe_dicom([dicom_header]):
return True
if "SeriesInstanceUID" not in dicom_header:
return False
if "InstanceNumber" not in dicom_header:
return False
if "ImageOrientationPatient" not in dicom_header or len(dicom_header.ImageOrientationPatient) < 6:
return False
if "ImagePositionPatient" not in dicom_header or len(dicom_header.ImagePositionPatient) < 3:
return False
# for all others if there is image position patient we assume it is ok
if Tag(0x0020, 0x0037) not in dicom_header:
return False
return True
except (KeyError, AttributeError):
return False | [
"def",
"is_valid_imaging_dicom",
"(",
"dicom_header",
")",
":",
"# if it is philips and multiframe dicom then we assume it is ok",
"try",
":",
"if",
"is_philips",
"(",
"[",
"dicom_header",
"]",
")",
":",
"if",
"is_multiframe_dicom",
"(",
"[",
"dicom_header",
"]",
")",
":",
"return",
"True",
"if",
"\"SeriesInstanceUID\"",
"not",
"in",
"dicom_header",
":",
"return",
"False",
"if",
"\"InstanceNumber\"",
"not",
"in",
"dicom_header",
":",
"return",
"False",
"if",
"\"ImageOrientationPatient\"",
"not",
"in",
"dicom_header",
"or",
"len",
"(",
"dicom_header",
".",
"ImageOrientationPatient",
")",
"<",
"6",
":",
"return",
"False",
"if",
"\"ImagePositionPatient\"",
"not",
"in",
"dicom_header",
"or",
"len",
"(",
"dicom_header",
".",
"ImagePositionPatient",
")",
"<",
"3",
":",
"return",
"False",
"# for all others if there is image position patient we assume it is ok",
"if",
"Tag",
"(",
"0x0020",
",",
"0x0037",
")",
"not",
"in",
"dicom_header",
":",
"return",
"False",
"return",
"True",
"except",
"(",
"KeyError",
",",
"AttributeError",
")",
":",
"return",
"False"
] | Function will do some basic checks to see if this is a valid imaging dicom | [
"Function",
"will",
"do",
"some",
"basic",
"checks",
"to",
"see",
"if",
"this",
"is",
"a",
"valid",
"imaging",
"dicom"
] | python | train |
dddomodossola/remi | remi/server.py | https://github.com/dddomodossola/remi/blob/85206f62220662bb7ecd471042268def71ccad28/remi/server.py#L98-L111 | def parse_session_cookie(cookie_to_cook):
""" cookie_to_cook = http_header['cookie']
"""
#print("cookie_to_cook: %s"%str(cookie_to_cook))
session_value = None
tokens = cookie_to_cook.split(";")
for tok in tokens:
if 'remi_session=' in tok:
#print("found session id: %s"%str(tok))
try:
session_value = int(tok.replace('remi_session=', ''))
except:
pass
return session_value | [
"def",
"parse_session_cookie",
"(",
"cookie_to_cook",
")",
":",
"#print(\"cookie_to_cook: %s\"%str(cookie_to_cook))",
"session_value",
"=",
"None",
"tokens",
"=",
"cookie_to_cook",
".",
"split",
"(",
"\";\"",
")",
"for",
"tok",
"in",
"tokens",
":",
"if",
"'remi_session='",
"in",
"tok",
":",
"#print(\"found session id: %s\"%str(tok))",
"try",
":",
"session_value",
"=",
"int",
"(",
"tok",
".",
"replace",
"(",
"'remi_session='",
",",
"''",
")",
")",
"except",
":",
"pass",
"return",
"session_value"
] | cookie_to_cook = http_header['cookie'] | [
"cookie_to_cook",
"=",
"http_header",
"[",
"cookie",
"]"
] | python | train |
bslatkin/dpxdt | dpxdt/server/work_queue.py | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue.py#L380-L407 | def query(**kwargs):
"""Queries for work items based on their criteria.
Args:
queue_name: Optional queue name to restrict to.
build_id: Optional build ID to restrict to.
release_id: Optional release ID to restrict to.
run_id: Optional run ID to restrict to.
count: How many tasks to fetch. Defaults to None, which means all
tasks are fetch that match the query.
Returns:
Dictionaries of the most recent tasks that match the criteria, in
order of most recently created. When count is 1 the return value will
be the most recent task or None. When count is not 1 the return value
will be a list of tasks.
"""
count = kwargs.get('count', None)
task_list = _query(**kwargs)
task_dict_list = [_task_to_dict(task) for task in task_list]
if count == 1:
if not task_dict_list:
return None
else:
return task_dict_list[0]
return task_dict_list | [
"def",
"query",
"(",
"*",
"*",
"kwargs",
")",
":",
"count",
"=",
"kwargs",
".",
"get",
"(",
"'count'",
",",
"None",
")",
"task_list",
"=",
"_query",
"(",
"*",
"*",
"kwargs",
")",
"task_dict_list",
"=",
"[",
"_task_to_dict",
"(",
"task",
")",
"for",
"task",
"in",
"task_list",
"]",
"if",
"count",
"==",
"1",
":",
"if",
"not",
"task_dict_list",
":",
"return",
"None",
"else",
":",
"return",
"task_dict_list",
"[",
"0",
"]",
"return",
"task_dict_list"
] | Queries for work items based on their criteria.
Args:
queue_name: Optional queue name to restrict to.
build_id: Optional build ID to restrict to.
release_id: Optional release ID to restrict to.
run_id: Optional run ID to restrict to.
count: How many tasks to fetch. Defaults to None, which means all
tasks are fetch that match the query.
Returns:
Dictionaries of the most recent tasks that match the criteria, in
order of most recently created. When count is 1 the return value will
be the most recent task or None. When count is not 1 the return value
will be a list of tasks. | [
"Queries",
"for",
"work",
"items",
"based",
"on",
"their",
"criteria",
"."
] | python | train |
aiortc/aiortc | aiortc/rtcsctptransport.py | https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcsctptransport.py#L1451-L1471 | def _update_advanced_peer_ack_point(self):
"""
Try to advance "Advanced.Peer.Ack.Point" according to RFC 3758.
"""
if uint32_gt(self._last_sacked_tsn, self._advanced_peer_ack_tsn):
self._advanced_peer_ack_tsn = self._last_sacked_tsn
done = 0
streams = {}
while self._sent_queue and self._sent_queue[0]._abandoned:
chunk = self._sent_queue.popleft()
self._advanced_peer_ack_tsn = chunk.tsn
done += 1
if not (chunk.flags & SCTP_DATA_UNORDERED):
streams[chunk.stream_id] = chunk.stream_seq
if done:
# build FORWARD TSN
self._forward_tsn_chunk = ForwardTsnChunk()
self._forward_tsn_chunk.cumulative_tsn = self._advanced_peer_ack_tsn
self._forward_tsn_chunk.streams = list(streams.items()) | [
"def",
"_update_advanced_peer_ack_point",
"(",
"self",
")",
":",
"if",
"uint32_gt",
"(",
"self",
".",
"_last_sacked_tsn",
",",
"self",
".",
"_advanced_peer_ack_tsn",
")",
":",
"self",
".",
"_advanced_peer_ack_tsn",
"=",
"self",
".",
"_last_sacked_tsn",
"done",
"=",
"0",
"streams",
"=",
"{",
"}",
"while",
"self",
".",
"_sent_queue",
"and",
"self",
".",
"_sent_queue",
"[",
"0",
"]",
".",
"_abandoned",
":",
"chunk",
"=",
"self",
".",
"_sent_queue",
".",
"popleft",
"(",
")",
"self",
".",
"_advanced_peer_ack_tsn",
"=",
"chunk",
".",
"tsn",
"done",
"+=",
"1",
"if",
"not",
"(",
"chunk",
".",
"flags",
"&",
"SCTP_DATA_UNORDERED",
")",
":",
"streams",
"[",
"chunk",
".",
"stream_id",
"]",
"=",
"chunk",
".",
"stream_seq",
"if",
"done",
":",
"# build FORWARD TSN",
"self",
".",
"_forward_tsn_chunk",
"=",
"ForwardTsnChunk",
"(",
")",
"self",
".",
"_forward_tsn_chunk",
".",
"cumulative_tsn",
"=",
"self",
".",
"_advanced_peer_ack_tsn",
"self",
".",
"_forward_tsn_chunk",
".",
"streams",
"=",
"list",
"(",
"streams",
".",
"items",
"(",
")",
")"
] | Try to advance "Advanced.Peer.Ack.Point" according to RFC 3758. | [
"Try",
"to",
"advance",
"Advanced",
".",
"Peer",
".",
"Ack",
".",
"Point",
"according",
"to",
"RFC",
"3758",
"."
] | python | train |
snare/scruffy | scruffy/file.py | https://github.com/snare/scruffy/blob/0fedc08cfdb6db927ff93c09f25f24ce5a04c541/scruffy/file.py#L323-L334 | def remove(self, recursive=True, ignore_error=True):
"""
Remove the directory.
"""
try:
if recursive or self._cleanup == 'recursive':
shutil.rmtree(self.path)
else:
os.rmdir(self.path)
except Exception as e:
if not ignore_error:
raise e | [
"def",
"remove",
"(",
"self",
",",
"recursive",
"=",
"True",
",",
"ignore_error",
"=",
"True",
")",
":",
"try",
":",
"if",
"recursive",
"or",
"self",
".",
"_cleanup",
"==",
"'recursive'",
":",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"path",
")",
"else",
":",
"os",
".",
"rmdir",
"(",
"self",
".",
"path",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"not",
"ignore_error",
":",
"raise",
"e"
] | Remove the directory. | [
"Remove",
"the",
"directory",
"."
] | python | test |
seequent/properties | properties/math.py | https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/math.py#L278-L292 | def validate(self, instance, value):
"""Check shape and dtype of vector and scales it to given length"""
value = super(BaseVector, self).validate(instance, value)
if self.length is not None:
try:
value.length = self._length_array(value)
except ZeroDivisionError:
self.error(
instance, value,
error_class=ZeroDivValidationError,
extra='The vector must have a length specified.'
)
return value | [
"def",
"validate",
"(",
"self",
",",
"instance",
",",
"value",
")",
":",
"value",
"=",
"super",
"(",
"BaseVector",
",",
"self",
")",
".",
"validate",
"(",
"instance",
",",
"value",
")",
"if",
"self",
".",
"length",
"is",
"not",
"None",
":",
"try",
":",
"value",
".",
"length",
"=",
"self",
".",
"_length_array",
"(",
"value",
")",
"except",
"ZeroDivisionError",
":",
"self",
".",
"error",
"(",
"instance",
",",
"value",
",",
"error_class",
"=",
"ZeroDivValidationError",
",",
"extra",
"=",
"'The vector must have a length specified.'",
")",
"return",
"value"
] | Check shape and dtype of vector and scales it to given length | [
"Check",
"shape",
"and",
"dtype",
"of",
"vector",
"and",
"scales",
"it",
"to",
"given",
"length"
] | python | train |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Defaults.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Defaults.py#L397-L447 | def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not itms:
return itms
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env) | [
"def",
"_stripixes",
"(",
"prefix",
",",
"itms",
",",
"suffix",
",",
"stripprefixes",
",",
"stripsuffixes",
",",
"env",
",",
"c",
"=",
"None",
")",
":",
"if",
"not",
"itms",
":",
"return",
"itms",
"if",
"not",
"callable",
"(",
"c",
")",
":",
"env_c",
"=",
"env",
"[",
"'_concat'",
"]",
"if",
"env_c",
"!=",
"_concat",
"and",
"callable",
"(",
"env_c",
")",
":",
"# There's a custom _concat() method in the construction",
"# environment, and we've allowed people to set that in",
"# the past (see test/custom-concat.py), so preserve the",
"# backwards compatibility.",
"c",
"=",
"env_c",
"else",
":",
"c",
"=",
"_concat_ixes",
"stripprefixes",
"=",
"list",
"(",
"map",
"(",
"env",
".",
"subst",
",",
"SCons",
".",
"Util",
".",
"flatten",
"(",
"stripprefixes",
")",
")",
")",
"stripsuffixes",
"=",
"list",
"(",
"map",
"(",
"env",
".",
"subst",
",",
"SCons",
".",
"Util",
".",
"flatten",
"(",
"stripsuffixes",
")",
")",
")",
"stripped",
"=",
"[",
"]",
"for",
"l",
"in",
"SCons",
".",
"PathList",
".",
"PathList",
"(",
"itms",
")",
".",
"subst_path",
"(",
"env",
",",
"None",
",",
"None",
")",
":",
"if",
"isinstance",
"(",
"l",
",",
"SCons",
".",
"Node",
".",
"FS",
".",
"File",
")",
":",
"stripped",
".",
"append",
"(",
"l",
")",
"continue",
"if",
"not",
"SCons",
".",
"Util",
".",
"is_String",
"(",
"l",
")",
":",
"l",
"=",
"str",
"(",
"l",
")",
"for",
"stripprefix",
"in",
"stripprefixes",
":",
"lsp",
"=",
"len",
"(",
"stripprefix",
")",
"if",
"l",
"[",
":",
"lsp",
"]",
"==",
"stripprefix",
":",
"l",
"=",
"l",
"[",
"lsp",
":",
"]",
"# Do not strip more than one prefix",
"break",
"for",
"stripsuffix",
"in",
"stripsuffixes",
":",
"lss",
"=",
"len",
"(",
"stripsuffix",
")",
"if",
"l",
"[",
"-",
"lss",
":",
"]",
"==",
"stripsuffix",
":",
"l",
"=",
"l",
"[",
":",
"-",
"lss",
"]",
"# Do not strip more than one suffix",
"break",
"stripped",
".",
"append",
"(",
"l",
")",
"return",
"c",
"(",
"prefix",
",",
"stripped",
",",
"suffix",
",",
"env",
")"
] | This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'. | [
"This",
"is",
"a",
"wrapper",
"around",
"_concat",
"()",
"/",
"_concat_ixes",
"()",
"that",
"checks",
"for",
"the",
"existence",
"of",
"prefixes",
"or",
"suffixes",
"on",
"list",
"items",
"and",
"strips",
"them",
"where",
"it",
"finds",
"them",
".",
"This",
"is",
"used",
"by",
"tools",
"(",
"like",
"the",
"GNU",
"linker",
")",
"that",
"need",
"to",
"turn",
"something",
"like",
"libfoo",
".",
"a",
"into",
"-",
"lfoo",
"."
] | python | train |
elifesciences/elife-tools | elifetools/utils.py | https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/utils.py#L523-L533 | def escape_ampersand(string):
"""
Quick convert unicode ampersand characters not associated with
a numbered entity or not starting with allowed characters to a plain &
"""
if not string:
return string
start_with_match = r"(\#x(....);|lt;|gt;|amp;)"
# The pattern below is match & that is not immediately followed by #
string = re.sub(r"&(?!" + start_with_match + ")", '&', string)
return string | [
"def",
"escape_ampersand",
"(",
"string",
")",
":",
"if",
"not",
"string",
":",
"return",
"string",
"start_with_match",
"=",
"r\"(\\#x(....);|lt;|gt;|amp;)\"",
"# The pattern below is match & that is not immediately followed by #",
"string",
"=",
"re",
".",
"sub",
"(",
"r\"&(?!\"",
"+",
"start_with_match",
"+",
"\")\"",
",",
"'&'",
",",
"string",
")",
"return",
"string"
] | Quick convert unicode ampersand characters not associated with
a numbered entity or not starting with allowed characters to a plain & | [
"Quick",
"convert",
"unicode",
"ampersand",
"characters",
"not",
"associated",
"with",
"a",
"numbered",
"entity",
"or",
"not",
"starting",
"with",
"allowed",
"characters",
"to",
"a",
"plain",
"&",
";"
] | python | train |
pneff/wsgiservice | wsgiservice/resource.py | https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/resource.py#L193-L217 | def get_method(self, method=None):
"""Returns the method to call on this instance as a string. Raises a
HTTP exception if no method can be found. Aborts with a 405 status
code for known methods (based on the :attr:`KNOWN_METHODS` list) and a
501 status code for all other methods.
:param method: Name of the method to return. Must be all-uppercase.
:type method: str
:raises: :class:`webob.exceptions.ResponseException` of status 405 or
501 if the method is not implemented on this resource.
"""
if method is None:
method = self.request.method
if hasattr(self, method) and callable(getattr(self, method)):
return method
elif method == 'HEAD':
return self.get_method('GET')
# Error: did not find any method, raise a 405 or 501 exception
elif method in self.KNOWN_METHODS:
# Known HTTP methods => 405 Method Not Allowed
raise_405(self)
else:
# Unknown HTTP methods => 501 Not Implemented
raise_501(self) | [
"def",
"get_method",
"(",
"self",
",",
"method",
"=",
"None",
")",
":",
"if",
"method",
"is",
"None",
":",
"method",
"=",
"self",
".",
"request",
".",
"method",
"if",
"hasattr",
"(",
"self",
",",
"method",
")",
"and",
"callable",
"(",
"getattr",
"(",
"self",
",",
"method",
")",
")",
":",
"return",
"method",
"elif",
"method",
"==",
"'HEAD'",
":",
"return",
"self",
".",
"get_method",
"(",
"'GET'",
")",
"# Error: did not find any method, raise a 405 or 501 exception",
"elif",
"method",
"in",
"self",
".",
"KNOWN_METHODS",
":",
"# Known HTTP methods => 405 Method Not Allowed",
"raise_405",
"(",
"self",
")",
"else",
":",
"# Unknown HTTP methods => 501 Not Implemented",
"raise_501",
"(",
"self",
")"
] | Returns the method to call on this instance as a string. Raises a
HTTP exception if no method can be found. Aborts with a 405 status
code for known methods (based on the :attr:`KNOWN_METHODS` list) and a
501 status code for all other methods.
:param method: Name of the method to return. Must be all-uppercase.
:type method: str
:raises: :class:`webob.exceptions.ResponseException` of status 405 or
501 if the method is not implemented on this resource. | [
"Returns",
"the",
"method",
"to",
"call",
"on",
"this",
"instance",
"as",
"a",
"string",
".",
"Raises",
"a",
"HTTP",
"exception",
"if",
"no",
"method",
"can",
"be",
"found",
".",
"Aborts",
"with",
"a",
"405",
"status",
"code",
"for",
"known",
"methods",
"(",
"based",
"on",
"the",
":",
"attr",
":",
"KNOWN_METHODS",
"list",
")",
"and",
"a",
"501",
"status",
"code",
"for",
"all",
"other",
"methods",
"."
] | python | train |
joferkington/mplstereonet | mplstereonet/stereonet_axes.py | https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L282-L294 | def _polar(self):
"""The "hidden" polar axis used for azimuth labels."""
# This will be called inside LambertAxes.__init__ as well as every
# time the axis is cleared, so we need the try/except to avoid having
# multiple hidden axes when `cla` is _manually_ called.
try:
return self._hidden_polar_axes
except AttributeError:
fig = self.get_figure()
self._hidden_polar_axes = fig.add_axes(self.get_position(True),
frameon=False, projection='polar')
self._hidden_polar_axes.format_coord = self._polar_format_coord
return self._hidden_polar_axes | [
"def",
"_polar",
"(",
"self",
")",
":",
"# This will be called inside LambertAxes.__init__ as well as every",
"# time the axis is cleared, so we need the try/except to avoid having",
"# multiple hidden axes when `cla` is _manually_ called.",
"try",
":",
"return",
"self",
".",
"_hidden_polar_axes",
"except",
"AttributeError",
":",
"fig",
"=",
"self",
".",
"get_figure",
"(",
")",
"self",
".",
"_hidden_polar_axes",
"=",
"fig",
".",
"add_axes",
"(",
"self",
".",
"get_position",
"(",
"True",
")",
",",
"frameon",
"=",
"False",
",",
"projection",
"=",
"'polar'",
")",
"self",
".",
"_hidden_polar_axes",
".",
"format_coord",
"=",
"self",
".",
"_polar_format_coord",
"return",
"self",
".",
"_hidden_polar_axes"
] | The "hidden" polar axis used for azimuth labels. | [
"The",
"hidden",
"polar",
"axis",
"used",
"for",
"azimuth",
"labels",
"."
] | python | train |
StellarCN/py-stellar-base | stellar_base/horizon.py | https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/horizon.py#L352-L376 | def assets(self, asset_code=None, asset_issuer=None, cursor=None, order='asc', limit=10):
"""This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param str asset_code: Code of the Asset to filter by.
:param str asset_issuer: Issuer of the Asset to filter by.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc",
ordered by asset_code then by asset_issuer.
:param int limit: Maximum number of records to return.
:return: A list of all valid payment operations
:rtype: dict
"""
endpoint = '/assets'
params = self.__query_params(asset_code=asset_code, asset_issuer=asset_issuer, cursor=cursor, order=order,
limit=limit)
return self.query(endpoint, params) | [
"def",
"assets",
"(",
"self",
",",
"asset_code",
"=",
"None",
",",
"asset_issuer",
"=",
"None",
",",
"cursor",
"=",
"None",
",",
"order",
"=",
"'asc'",
",",
"limit",
"=",
"10",
")",
":",
"endpoint",
"=",
"'/assets'",
"params",
"=",
"self",
".",
"__query_params",
"(",
"asset_code",
"=",
"asset_code",
",",
"asset_issuer",
"=",
"asset_issuer",
",",
"cursor",
"=",
"cursor",
",",
"order",
"=",
"order",
",",
"limit",
"=",
"limit",
")",
"return",
"self",
".",
"query",
"(",
"endpoint",
",",
"params",
")"
] | This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param str asset_code: Code of the Asset to filter by.
:param str asset_issuer: Issuer of the Asset to filter by.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc",
ordered by asset_code then by asset_issuer.
:param int limit: Maximum number of records to return.
:return: A list of all valid payment operations
:rtype: dict | [
"This",
"endpoint",
"represents",
"all",
"assets",
".",
"It",
"will",
"give",
"you",
"all",
"the",
"assets",
"in",
"the",
"system",
"along",
"with",
"various",
"statistics",
"about",
"each",
"."
] | python | train |
darothen/xbpch | xbpch/uff.py | https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/uff.py#L67-L77 | def _fix(self, fmt='i'):
"""
Read pre- or suffix of line at current position with given
format `fmt` (default 'i').
"""
fmt = self.endian + fmt
fix = self.read(struct.calcsize(fmt))
if fix:
return struct.unpack(fmt, fix)[0]
else:
raise EOFError | [
"def",
"_fix",
"(",
"self",
",",
"fmt",
"=",
"'i'",
")",
":",
"fmt",
"=",
"self",
".",
"endian",
"+",
"fmt",
"fix",
"=",
"self",
".",
"read",
"(",
"struct",
".",
"calcsize",
"(",
"fmt",
")",
")",
"if",
"fix",
":",
"return",
"struct",
".",
"unpack",
"(",
"fmt",
",",
"fix",
")",
"[",
"0",
"]",
"else",
":",
"raise",
"EOFError"
] | Read pre- or suffix of line at current position with given
format `fmt` (default 'i'). | [
"Read",
"pre",
"-",
"or",
"suffix",
"of",
"line",
"at",
"current",
"position",
"with",
"given",
"format",
"fmt",
"(",
"default",
"i",
")",
"."
] | python | train |
kislyuk/ensure | ensure/main.py | https://github.com/kislyuk/ensure/blob/0a562a4b469ffbaf71c75dc4d394e94334c831f0/ensure/main.py#L221-L226 | def is_not(self, other):
"""
Ensures :attr:`subject` is not *other* (object identity check).
"""
self._run(unittest_case.assertIsNot, (self._subject, other))
return ChainInspector(self._subject) | [
"def",
"is_not",
"(",
"self",
",",
"other",
")",
":",
"self",
".",
"_run",
"(",
"unittest_case",
".",
"assertIsNot",
",",
"(",
"self",
".",
"_subject",
",",
"other",
")",
")",
"return",
"ChainInspector",
"(",
"self",
".",
"_subject",
")"
] | Ensures :attr:`subject` is not *other* (object identity check). | [
"Ensures",
":",
"attr",
":",
"subject",
"is",
"not",
"*",
"other",
"*",
"(",
"object",
"identity",
"check",
")",
"."
] | python | train |
jilljenn/tryalgo | tryalgo/primes.py | https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/primes.py#L29-L48 | def gries_misra(n):
"""Prime numbers by the sieve of Gries-Misra
Computes both the list of all prime numbers less than n,
and a table mapping every integer 2 ≤ x < n to its smallest prime factor
:param n: positive integer
:returns: list of prime numbers, and list of prime factors
:complexity: O(n)
"""
primes = []
factor = [0] * n
for x in range(2, n):
if not factor[x]: # no factor found
factor[x] = x # meaning x is prime
primes.append(x)
for p in primes: # loop over all non primes of the form p * x
if p > factor[x] or p * x >= n:
break
factor[p * x] = p
return primes, factor | [
"def",
"gries_misra",
"(",
"n",
")",
":",
"primes",
"=",
"[",
"]",
"factor",
"=",
"[",
"0",
"]",
"*",
"n",
"for",
"x",
"in",
"range",
"(",
"2",
",",
"n",
")",
":",
"if",
"not",
"factor",
"[",
"x",
"]",
":",
"# no factor found",
"factor",
"[",
"x",
"]",
"=",
"x",
"# meaning x is prime",
"primes",
".",
"append",
"(",
"x",
")",
"for",
"p",
"in",
"primes",
":",
"# loop over all non primes of the form p * x",
"if",
"p",
">",
"factor",
"[",
"x",
"]",
"or",
"p",
"*",
"x",
">=",
"n",
":",
"break",
"factor",
"[",
"p",
"*",
"x",
"]",
"=",
"p",
"return",
"primes",
",",
"factor"
] | Prime numbers by the sieve of Gries-Misra
Computes both the list of all prime numbers less than n,
and a table mapping every integer 2 ≤ x < n to its smallest prime factor
:param n: positive integer
:returns: list of prime numbers, and list of prime factors
:complexity: O(n) | [
"Prime",
"numbers",
"by",
"the",
"sieve",
"of",
"Gries",
"-",
"Misra",
"Computes",
"both",
"the",
"list",
"of",
"all",
"prime",
"numbers",
"less",
"than",
"n",
"and",
"a",
"table",
"mapping",
"every",
"integer",
"2",
"≤",
"x",
"<",
"n",
"to",
"its",
"smallest",
"prime",
"factor"
] | python | train |
pjuren/pyokit | src/pyokit/datastruct/multipleAlignment.py | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/multipleAlignment.py#L201-L257 | def alignment_to_sequence_coords(self, seq_name, start, end, trim=False):
"""
convert an interval in the alignmnet into co-ordinates in one of the
sequences. Alignment intervals are inclusive of start, but not end. They
are one-based. Hence the full alignment has coords [1, N+1), where N is the
length of the alignment (number of columns). Sequence coords follow the
same conventions: one-based, inclusive of start but not end.
:param seq_name: which sequence are the start and end coords for?
:param start: start of the interval in alignment co-ordinates
:param end: end of the interval in alignment co-ordinates
:param trim: if true, trim coordinates that fall partially outside
the alignment
:return: a tuple with the start and end sequence coords, in that order;
None if the interval in the alignment defined by (start, end)
contains only gaps in the specified sequence. start < end always,
even if the sequence is reverse complement.
:raises AlignmentError: if the sequence number specifies a sequence not
in this alignment, or the coordinates fall
entirely outside the alignment (or partially
outside and trim == false), or they are not a valid
interval (start >= end)
"""
start = 1 if start < 1 and trim else start
end = self.size() + 1 if (end > self.size() + 1) and trim else end
if (start < 1 or end > (self.size() + 1)):
msg = "Coordinates fall partially outside alignemnt: " + str(start) +\
", " + str(end)
raise InvalidAlignmentCoordinatesError(msg)
if (end < 1 or start > self.size() + 1):
msg = "Coordinates fall entirely outside alignment: " + str(start) +\
", " + str(end)
raise InvalidAlignmentCoordinatesError(msg)
if (end <= start):
msg = "Invalid alignment coordinates: " + str(start) + ", " + str(end)
raise InvalidAlignmentCoordinatesError(msg)
seq = self[seq_name]
pos_strand = seq.is_positive_strand()
non_gaps = 0
r_start = None
r_end = None
l_start = 0 if pos_strand else self.size() - 1
l_end = end - 1 if pos_strand else start - 2
l_step = 1 if pos_strand else -1
for i in range(l_start, l_end, l_step):
if seq[i] != GAP_CHAR:
non_gaps += 1
if ((pos_strand and r_start is None and (i + 1) >= start) or
(not pos_strand and r_start is None and (i + 1) < end)):
r_start = non_gaps + seq.start - 1
if r_start is None:
# we went through the whole region and didn't find a single non-gap char
return None
r_end = non_gaps + seq.start
return (r_start, r_end) | [
"def",
"alignment_to_sequence_coords",
"(",
"self",
",",
"seq_name",
",",
"start",
",",
"end",
",",
"trim",
"=",
"False",
")",
":",
"start",
"=",
"1",
"if",
"start",
"<",
"1",
"and",
"trim",
"else",
"start",
"end",
"=",
"self",
".",
"size",
"(",
")",
"+",
"1",
"if",
"(",
"end",
">",
"self",
".",
"size",
"(",
")",
"+",
"1",
")",
"and",
"trim",
"else",
"end",
"if",
"(",
"start",
"<",
"1",
"or",
"end",
">",
"(",
"self",
".",
"size",
"(",
")",
"+",
"1",
")",
")",
":",
"msg",
"=",
"\"Coordinates fall partially outside alignemnt: \"",
"+",
"str",
"(",
"start",
")",
"+",
"\", \"",
"+",
"str",
"(",
"end",
")",
"raise",
"InvalidAlignmentCoordinatesError",
"(",
"msg",
")",
"if",
"(",
"end",
"<",
"1",
"or",
"start",
">",
"self",
".",
"size",
"(",
")",
"+",
"1",
")",
":",
"msg",
"=",
"\"Coordinates fall entirely outside alignment: \"",
"+",
"str",
"(",
"start",
")",
"+",
"\", \"",
"+",
"str",
"(",
"end",
")",
"raise",
"InvalidAlignmentCoordinatesError",
"(",
"msg",
")",
"if",
"(",
"end",
"<=",
"start",
")",
":",
"msg",
"=",
"\"Invalid alignment coordinates: \"",
"+",
"str",
"(",
"start",
")",
"+",
"\", \"",
"+",
"str",
"(",
"end",
")",
"raise",
"InvalidAlignmentCoordinatesError",
"(",
"msg",
")",
"seq",
"=",
"self",
"[",
"seq_name",
"]",
"pos_strand",
"=",
"seq",
".",
"is_positive_strand",
"(",
")",
"non_gaps",
"=",
"0",
"r_start",
"=",
"None",
"r_end",
"=",
"None",
"l_start",
"=",
"0",
"if",
"pos_strand",
"else",
"self",
".",
"size",
"(",
")",
"-",
"1",
"l_end",
"=",
"end",
"-",
"1",
"if",
"pos_strand",
"else",
"start",
"-",
"2",
"l_step",
"=",
"1",
"if",
"pos_strand",
"else",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"l_start",
",",
"l_end",
",",
"l_step",
")",
":",
"if",
"seq",
"[",
"i",
"]",
"!=",
"GAP_CHAR",
":",
"non_gaps",
"+=",
"1",
"if",
"(",
"(",
"pos_strand",
"and",
"r_start",
"is",
"None",
"and",
"(",
"i",
"+",
"1",
")",
">=",
"start",
")",
"or",
"(",
"not",
"pos_strand",
"and",
"r_start",
"is",
"None",
"and",
"(",
"i",
"+",
"1",
")",
"<",
"end",
")",
")",
":",
"r_start",
"=",
"non_gaps",
"+",
"seq",
".",
"start",
"-",
"1",
"if",
"r_start",
"is",
"None",
":",
"# we went through the whole region and didn't find a single non-gap char",
"return",
"None",
"r_end",
"=",
"non_gaps",
"+",
"seq",
".",
"start",
"return",
"(",
"r_start",
",",
"r_end",
")"
] | convert an interval in the alignmnet into co-ordinates in one of the
sequences. Alignment intervals are inclusive of start, but not end. They
are one-based. Hence the full alignment has coords [1, N+1), where N is the
length of the alignment (number of columns). Sequence coords follow the
same conventions: one-based, inclusive of start but not end.
:param seq_name: which sequence are the start and end coords for?
:param start: start of the interval in alignment co-ordinates
:param end: end of the interval in alignment co-ordinates
:param trim: if true, trim coordinates that fall partially outside
the alignment
:return: a tuple with the start and end sequence coords, in that order;
None if the interval in the alignment defined by (start, end)
contains only gaps in the specified sequence. start < end always,
even if the sequence is reverse complement.
:raises AlignmentError: if the sequence number specifies a sequence not
in this alignment, or the coordinates fall
entirely outside the alignment (or partially
outside and trim == false), or they are not a valid
interval (start >= end) | [
"convert",
"an",
"interval",
"in",
"the",
"alignmnet",
"into",
"co",
"-",
"ordinates",
"in",
"one",
"of",
"the",
"sequences",
".",
"Alignment",
"intervals",
"are",
"inclusive",
"of",
"start",
"but",
"not",
"end",
".",
"They",
"are",
"one",
"-",
"based",
".",
"Hence",
"the",
"full",
"alignment",
"has",
"coords",
"[",
"1",
"N",
"+",
"1",
")",
"where",
"N",
"is",
"the",
"length",
"of",
"the",
"alignment",
"(",
"number",
"of",
"columns",
")",
".",
"Sequence",
"coords",
"follow",
"the",
"same",
"conventions",
":",
"one",
"-",
"based",
"inclusive",
"of",
"start",
"but",
"not",
"end",
"."
] | python | train |
googleapis/google-cloud-python | talent/google/cloud/talent_v4beta1/gapic/tenant_service_client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/talent/google/cloud/talent_v4beta1/gapic/tenant_service_client.py#L337-L410 | def update_tenant(
self,
tenant,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates specified tenant.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.TenantServiceClient()
>>>
>>> # TODO: Initialize `tenant`:
>>> tenant = {}
>>>
>>> response = client.update_tenant(tenant)
Args:
tenant (Union[dict, ~google.cloud.talent_v4beta1.types.Tenant]): Required.
The tenant resource to replace the current resource in the system.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.Tenant`
update_mask (Union[dict, ~google.cloud.talent_v4beta1.types.FieldMask]): Optional but strongly recommended for the best service experience.
If ``update_mask`` is provided, only the specified fields in ``tenant``
are updated. Otherwise all the fields are updated.
A field mask to specify the tenant fields to be updated. Only top level
fields of ``Tenant`` are supported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_tenant" not in self._inner_api_calls:
self._inner_api_calls[
"update_tenant"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_tenant,
default_retry=self._method_configs["UpdateTenant"].retry,
default_timeout=self._method_configs["UpdateTenant"].timeout,
client_info=self._client_info,
)
request = tenant_service_pb2.UpdateTenantRequest(
tenant=tenant, update_mask=update_mask
)
return self._inner_api_calls["update_tenant"](
request, retry=retry, timeout=timeout, metadata=metadata
) | [
"def",
"update_tenant",
"(",
"self",
",",
"tenant",
",",
"update_mask",
"=",
"None",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
",",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"\"update_tenant\"",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"\"update_tenant\"",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"update_tenant",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"\"UpdateTenant\"",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"\"UpdateTenant\"",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"tenant_service_pb2",
".",
"UpdateTenantRequest",
"(",
"tenant",
"=",
"tenant",
",",
"update_mask",
"=",
"update_mask",
")",
"return",
"self",
".",
"_inner_api_calls",
"[",
"\"update_tenant\"",
"]",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")"
] | Updates specified tenant.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.TenantServiceClient()
>>>
>>> # TODO: Initialize `tenant`:
>>> tenant = {}
>>>
>>> response = client.update_tenant(tenant)
Args:
tenant (Union[dict, ~google.cloud.talent_v4beta1.types.Tenant]): Required.
The tenant resource to replace the current resource in the system.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.Tenant`
update_mask (Union[dict, ~google.cloud.talent_v4beta1.types.FieldMask]): Optional but strongly recommended for the best service experience.
If ``update_mask`` is provided, only the specified fields in ``tenant``
are updated. Otherwise all the fields are updated.
A field mask to specify the tenant fields to be updated. Only top level
fields of ``Tenant`` are supported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | [
"Updates",
"specified",
"tenant",
"."
] | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/gloo/framebuffer.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/framebuffer.py#L52-L90 | def resize(self, shape, format=None):
""" Set the render-buffer size and format
Parameters
----------
shape : tuple of integers
New shape in yx order. A render buffer is always 2D. For
symmetry with the texture class, a 3-element tuple can also
be given, in which case the last dimension is ignored.
format : {None, 'color', 'depth', 'stencil'}
The buffer format. If None, the current format is maintained.
If that is also None, the format will be set upon attaching
it to a framebuffer. One can also specify the explicit enum:
GL_RGB565, GL_RGBA4, GL_RGB5_A1, GL_DEPTH_COMPONENT16, or
GL_STENCIL_INDEX8
"""
if not self._resizeable:
raise RuntimeError("RenderBuffer is not resizeable")
# Check shape
if not (isinstance(shape, tuple) and len(shape) in (2, 3)):
raise ValueError('RenderBuffer shape must be a 2/3 element tuple')
# Check format
if format is None:
format = self._format # Use current format (may be None)
elif isinstance(format, int):
pass # Do not check, maybe user needs desktop GL formats
elif isinstance(format, string_types):
if format not in ('color', 'depth', 'stencil'):
raise ValueError('RenderBuffer format must be "color", "depth"'
' or "stencil", not %r' % format)
else:
raise ValueError('Invalid RenderBuffer format: %r' % format)
# Store and send GLIR command
self._shape = tuple(shape[:2])
self._format = format
if self._format is not None:
self._glir.command('SIZE', self._id, self._shape, self._format) | [
"def",
"resize",
"(",
"self",
",",
"shape",
",",
"format",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_resizeable",
":",
"raise",
"RuntimeError",
"(",
"\"RenderBuffer is not resizeable\"",
")",
"# Check shape",
"if",
"not",
"(",
"isinstance",
"(",
"shape",
",",
"tuple",
")",
"and",
"len",
"(",
"shape",
")",
"in",
"(",
"2",
",",
"3",
")",
")",
":",
"raise",
"ValueError",
"(",
"'RenderBuffer shape must be a 2/3 element tuple'",
")",
"# Check format",
"if",
"format",
"is",
"None",
":",
"format",
"=",
"self",
".",
"_format",
"# Use current format (may be None)",
"elif",
"isinstance",
"(",
"format",
",",
"int",
")",
":",
"pass",
"# Do not check, maybe user needs desktop GL formats",
"elif",
"isinstance",
"(",
"format",
",",
"string_types",
")",
":",
"if",
"format",
"not",
"in",
"(",
"'color'",
",",
"'depth'",
",",
"'stencil'",
")",
":",
"raise",
"ValueError",
"(",
"'RenderBuffer format must be \"color\", \"depth\"'",
"' or \"stencil\", not %r'",
"%",
"format",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid RenderBuffer format: %r'",
"%",
"format",
")",
"# Store and send GLIR command",
"self",
".",
"_shape",
"=",
"tuple",
"(",
"shape",
"[",
":",
"2",
"]",
")",
"self",
".",
"_format",
"=",
"format",
"if",
"self",
".",
"_format",
"is",
"not",
"None",
":",
"self",
".",
"_glir",
".",
"command",
"(",
"'SIZE'",
",",
"self",
".",
"_id",
",",
"self",
".",
"_shape",
",",
"self",
".",
"_format",
")"
] | Set the render-buffer size and format
Parameters
----------
shape : tuple of integers
New shape in yx order. A render buffer is always 2D. For
symmetry with the texture class, a 3-element tuple can also
be given, in which case the last dimension is ignored.
format : {None, 'color', 'depth', 'stencil'}
The buffer format. If None, the current format is maintained.
If that is also None, the format will be set upon attaching
it to a framebuffer. One can also specify the explicit enum:
GL_RGB565, GL_RGBA4, GL_RGB5_A1, GL_DEPTH_COMPONENT16, or
GL_STENCIL_INDEX8 | [
"Set",
"the",
"render",
"-",
"buffer",
"size",
"and",
"format"
] | python | train |
danilobellini/audiolazy | audiolazy/lazy_filters.py | https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_filters.py#L1176-L1205 | def resonator(freq, bandwidth):
"""
Resonator filter with 2-poles (conjugated pair) and no zeros (constant
numerator), with exponential approximation for bandwidth calculation.
Parameters
----------
freq :
Resonant frequency in rad/sample (max gain).
bandwidth :
Bandwidth frequency range in rad/sample following the equation:
``R = exp(-bandwidth / 2)``
where R is the pole amplitude (radius).
Returns
-------
A ZFilter object.
Gain is normalized to have peak with 0 dB (1.0 amplitude).
"""
bandwidth = thub(bandwidth, 1)
R = exp(-bandwidth * .5)
R = thub(R, 5)
cost = cos(freq) * (2 * R) / (1 + R ** 2)
cost = thub(cost, 2)
gain = (1 - R ** 2) * sqrt(1 - cost ** 2)
denominator = 1 - 2 * R * cost * z ** -1 + R ** 2 * z ** -2
return gain / denominator | [
"def",
"resonator",
"(",
"freq",
",",
"bandwidth",
")",
":",
"bandwidth",
"=",
"thub",
"(",
"bandwidth",
",",
"1",
")",
"R",
"=",
"exp",
"(",
"-",
"bandwidth",
"*",
".5",
")",
"R",
"=",
"thub",
"(",
"R",
",",
"5",
")",
"cost",
"=",
"cos",
"(",
"freq",
")",
"*",
"(",
"2",
"*",
"R",
")",
"/",
"(",
"1",
"+",
"R",
"**",
"2",
")",
"cost",
"=",
"thub",
"(",
"cost",
",",
"2",
")",
"gain",
"=",
"(",
"1",
"-",
"R",
"**",
"2",
")",
"*",
"sqrt",
"(",
"1",
"-",
"cost",
"**",
"2",
")",
"denominator",
"=",
"1",
"-",
"2",
"*",
"R",
"*",
"cost",
"*",
"z",
"**",
"-",
"1",
"+",
"R",
"**",
"2",
"*",
"z",
"**",
"-",
"2",
"return",
"gain",
"/",
"denominator"
] | Resonator filter with 2-poles (conjugated pair) and no zeros (constant
numerator), with exponential approximation for bandwidth calculation.
Parameters
----------
freq :
Resonant frequency in rad/sample (max gain).
bandwidth :
Bandwidth frequency range in rad/sample following the equation:
``R = exp(-bandwidth / 2)``
where R is the pole amplitude (radius).
Returns
-------
A ZFilter object.
Gain is normalized to have peak with 0 dB (1.0 amplitude). | [
"Resonator",
"filter",
"with",
"2",
"-",
"poles",
"(",
"conjugated",
"pair",
")",
"and",
"no",
"zeros",
"(",
"constant",
"numerator",
")",
"with",
"exponential",
"approximation",
"for",
"bandwidth",
"calculation",
"."
] | python | train |
senaite/senaite.core | bika/lims/upgrade/v01_02_007.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/upgrade/v01_02_007.py#L54-L82 | def update_permissions_rejected_analysis_requests():
"""
Maps and updates the permissions for rejected analysis requests so lab clerks, clients, owners and
RegulatoryInspector can see rejected analysis requests on lists.
:return: None
"""
workflow_tool = api.get_tool("portal_workflow")
workflow = workflow_tool.getWorkflowById('bika_ar_workflow')
catalog = api.get_tool(CATALOG_ANALYSIS_REQUEST_LISTING)
brains = catalog(review_state='rejected')
counter = 0
total = len(brains)
logger.info(
"Changing permissions for rejected analysis requests. " +
"Number of rejected analysis requests: {0}".format(total))
for brain in brains:
if 'LabClerk' not in brain.allowedRolesAndUsers:
if counter % 100 == 0:
logger.info(
"Changing permissions for rejected analysis requests: " +
"{0}/{1}".format(counter, total))
obj = api.get_object(brain)
workflow.updateRoleMappingsFor(obj)
obj.reindexObject()
counter += 1
logger.info(
"Changed permissions for rejected analysis requests: " +
"{0}/{1}".format(counter, total)) | [
"def",
"update_permissions_rejected_analysis_requests",
"(",
")",
":",
"workflow_tool",
"=",
"api",
".",
"get_tool",
"(",
"\"portal_workflow\"",
")",
"workflow",
"=",
"workflow_tool",
".",
"getWorkflowById",
"(",
"'bika_ar_workflow'",
")",
"catalog",
"=",
"api",
".",
"get_tool",
"(",
"CATALOG_ANALYSIS_REQUEST_LISTING",
")",
"brains",
"=",
"catalog",
"(",
"review_state",
"=",
"'rejected'",
")",
"counter",
"=",
"0",
"total",
"=",
"len",
"(",
"brains",
")",
"logger",
".",
"info",
"(",
"\"Changing permissions for rejected analysis requests. \"",
"+",
"\"Number of rejected analysis requests: {0}\"",
".",
"format",
"(",
"total",
")",
")",
"for",
"brain",
"in",
"brains",
":",
"if",
"'LabClerk'",
"not",
"in",
"brain",
".",
"allowedRolesAndUsers",
":",
"if",
"counter",
"%",
"100",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"\"Changing permissions for rejected analysis requests: \"",
"+",
"\"{0}/{1}\"",
".",
"format",
"(",
"counter",
",",
"total",
")",
")",
"obj",
"=",
"api",
".",
"get_object",
"(",
"brain",
")",
"workflow",
".",
"updateRoleMappingsFor",
"(",
"obj",
")",
"obj",
".",
"reindexObject",
"(",
")",
"counter",
"+=",
"1",
"logger",
".",
"info",
"(",
"\"Changed permissions for rejected analysis requests: \"",
"+",
"\"{0}/{1}\"",
".",
"format",
"(",
"counter",
",",
"total",
")",
")"
] | Maps and updates the permissions for rejected analysis requests so lab clerks, clients, owners and
RegulatoryInspector can see rejected analysis requests on lists.
:return: None | [
"Maps",
"and",
"updates",
"the",
"permissions",
"for",
"rejected",
"analysis",
"requests",
"so",
"lab",
"clerks",
"clients",
"owners",
"and",
"RegulatoryInspector",
"can",
"see",
"rejected",
"analysis",
"requests",
"on",
"lists",
"."
] | python | train |
ArduPilot/MAVProxy | MAVProxy/modules/lib/mp_image.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/mp_image.py#L348-L390 | def on_redraw_timer(self, event):
'''the redraw timer ensures we show new map tiles as they
are downloaded'''
state = self.state
while state.in_queue.qsize():
try:
obj = state.in_queue.get()
except Exception:
time.sleep(0.05)
return
if isinstance(obj, MPImageData):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
img = wx.EmptyImage(obj.width, obj.height)
img.SetData(obj.data)
self.img = img
self.need_redraw = True
if state.auto_size:
client_area = state.frame.GetClientSize()
total_area = state.frame.GetSize()
bx = max(total_area.x - client_area.x,0)
by = max(total_area.y - client_area.y,0)
state.frame.SetSize(wx.Size(obj.width+bx, obj.height+by))
if isinstance(obj, MPImageTitle):
state.frame.SetTitle(obj.title)
if isinstance(obj, MPImageRecenter):
self.on_recenter(obj.location)
if isinstance(obj, MPImageMenu):
self.set_menu(obj.menu)
if isinstance(obj, MPImagePopupMenu):
self.set_popup_menu(obj.menu)
if isinstance(obj, MPImageBrightness):
state.brightness = obj.brightness
self.need_redraw = True
if isinstance(obj, MPImageFullSize):
self.full_size()
if isinstance(obj, MPImageFitToWindow):
self.fit_to_window()
if isinstance(obj, win_layout.WinLayout):
win_layout.set_wx_window_layout(state.frame, obj)
if self.need_redraw:
self.redraw() | [
"def",
"on_redraw_timer",
"(",
"self",
",",
"event",
")",
":",
"state",
"=",
"self",
".",
"state",
"while",
"state",
".",
"in_queue",
".",
"qsize",
"(",
")",
":",
"try",
":",
"obj",
"=",
"state",
".",
"in_queue",
".",
"get",
"(",
")",
"except",
"Exception",
":",
"time",
".",
"sleep",
"(",
"0.05",
")",
"return",
"if",
"isinstance",
"(",
"obj",
",",
"MPImageData",
")",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"'ignore'",
")",
"img",
"=",
"wx",
".",
"EmptyImage",
"(",
"obj",
".",
"width",
",",
"obj",
".",
"height",
")",
"img",
".",
"SetData",
"(",
"obj",
".",
"data",
")",
"self",
".",
"img",
"=",
"img",
"self",
".",
"need_redraw",
"=",
"True",
"if",
"state",
".",
"auto_size",
":",
"client_area",
"=",
"state",
".",
"frame",
".",
"GetClientSize",
"(",
")",
"total_area",
"=",
"state",
".",
"frame",
".",
"GetSize",
"(",
")",
"bx",
"=",
"max",
"(",
"total_area",
".",
"x",
"-",
"client_area",
".",
"x",
",",
"0",
")",
"by",
"=",
"max",
"(",
"total_area",
".",
"y",
"-",
"client_area",
".",
"y",
",",
"0",
")",
"state",
".",
"frame",
".",
"SetSize",
"(",
"wx",
".",
"Size",
"(",
"obj",
".",
"width",
"+",
"bx",
",",
"obj",
".",
"height",
"+",
"by",
")",
")",
"if",
"isinstance",
"(",
"obj",
",",
"MPImageTitle",
")",
":",
"state",
".",
"frame",
".",
"SetTitle",
"(",
"obj",
".",
"title",
")",
"if",
"isinstance",
"(",
"obj",
",",
"MPImageRecenter",
")",
":",
"self",
".",
"on_recenter",
"(",
"obj",
".",
"location",
")",
"if",
"isinstance",
"(",
"obj",
",",
"MPImageMenu",
")",
":",
"self",
".",
"set_menu",
"(",
"obj",
".",
"menu",
")",
"if",
"isinstance",
"(",
"obj",
",",
"MPImagePopupMenu",
")",
":",
"self",
".",
"set_popup_menu",
"(",
"obj",
".",
"menu",
")",
"if",
"isinstance",
"(",
"obj",
",",
"MPImageBrightness",
")",
":",
"state",
".",
"brightness",
"=",
"obj",
".",
"brightness",
"self",
".",
"need_redraw",
"=",
"True",
"if",
"isinstance",
"(",
"obj",
",",
"MPImageFullSize",
")",
":",
"self",
".",
"full_size",
"(",
")",
"if",
"isinstance",
"(",
"obj",
",",
"MPImageFitToWindow",
")",
":",
"self",
".",
"fit_to_window",
"(",
")",
"if",
"isinstance",
"(",
"obj",
",",
"win_layout",
".",
"WinLayout",
")",
":",
"win_layout",
".",
"set_wx_window_layout",
"(",
"state",
".",
"frame",
",",
"obj",
")",
"if",
"self",
".",
"need_redraw",
":",
"self",
".",
"redraw",
"(",
")"
] | the redraw timer ensures we show new map tiles as they
are downloaded | [
"the",
"redraw",
"timer",
"ensures",
"we",
"show",
"new",
"map",
"tiles",
"as",
"they",
"are",
"downloaded"
] | python | train |
sunt05/SuPy | docs/source/proc_var_info/nml_rst_proc.py | https://github.com/sunt05/SuPy/blob/47178bd5aee50a059414e3e504940662fbfae0dc/docs/source/proc_var_info/nml_rst_proc.py#L83-L97 | def form_option(str_opt):
'''generate option name based suffix for URL
:param str_opt: opt name
:type str_opt: str
:return: URL suffix for the specified option
:rtype: str
'''
str_base = '#cmdoption-arg-'
str_opt_x = str_base+str_opt.lower()\
.replace('_', '-')\
.replace('(', '-')\
.replace(')', '')
return str_opt_x | [
"def",
"form_option",
"(",
"str_opt",
")",
":",
"str_base",
"=",
"'#cmdoption-arg-'",
"str_opt_x",
"=",
"str_base",
"+",
"str_opt",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
".",
"replace",
"(",
"'('",
",",
"'-'",
")",
".",
"replace",
"(",
"')'",
",",
"''",
")",
"return",
"str_opt_x"
] | generate option name based suffix for URL
:param str_opt: opt name
:type str_opt: str
:return: URL suffix for the specified option
:rtype: str | [
"generate",
"option",
"name",
"based",
"suffix",
"for",
"URL"
] | python | train |
evhub/coconut | coconut/compiler/compiler.py | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1880-L1886 | def check_strict(self, name, original, loc, tokens):
"""Check that syntax meets --strict requirements."""
internal_assert(len(tokens) == 1, "invalid " + name + " tokens", tokens)
if self.strict:
raise self.make_err(CoconutStyleError, "found " + name, original, loc)
else:
return tokens[0] | [
"def",
"check_strict",
"(",
"self",
",",
"name",
",",
"original",
",",
"loc",
",",
"tokens",
")",
":",
"internal_assert",
"(",
"len",
"(",
"tokens",
")",
"==",
"1",
",",
"\"invalid \"",
"+",
"name",
"+",
"\" tokens\"",
",",
"tokens",
")",
"if",
"self",
".",
"strict",
":",
"raise",
"self",
".",
"make_err",
"(",
"CoconutStyleError",
",",
"\"found \"",
"+",
"name",
",",
"original",
",",
"loc",
")",
"else",
":",
"return",
"tokens",
"[",
"0",
"]"
] | Check that syntax meets --strict requirements. | [
"Check",
"that",
"syntax",
"meets",
"--",
"strict",
"requirements",
"."
] | python | train |
flatangle/flatlib | flatlib/dignities/essential.py | https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/dignities/essential.py#L119-L134 | def getInfo(sign, lon):
""" Returns the complete essential dignities
for a sign and longitude.
"""
return {
'ruler': ruler(sign),
'exalt': exalt(sign),
'dayTrip': dayTrip(sign),
'nightTrip': nightTrip(sign),
'partTrip': partTrip(sign),
'term': term(sign, lon),
'face': face(sign, lon),
'exile': exile(sign),
'fall': fall(sign)
} | [
"def",
"getInfo",
"(",
"sign",
",",
"lon",
")",
":",
"return",
"{",
"'ruler'",
":",
"ruler",
"(",
"sign",
")",
",",
"'exalt'",
":",
"exalt",
"(",
"sign",
")",
",",
"'dayTrip'",
":",
"dayTrip",
"(",
"sign",
")",
",",
"'nightTrip'",
":",
"nightTrip",
"(",
"sign",
")",
",",
"'partTrip'",
":",
"partTrip",
"(",
"sign",
")",
",",
"'term'",
":",
"term",
"(",
"sign",
",",
"lon",
")",
",",
"'face'",
":",
"face",
"(",
"sign",
",",
"lon",
")",
",",
"'exile'",
":",
"exile",
"(",
"sign",
")",
",",
"'fall'",
":",
"fall",
"(",
"sign",
")",
"}"
] | Returns the complete essential dignities
for a sign and longitude. | [
"Returns",
"the",
"complete",
"essential",
"dignities",
"for",
"a",
"sign",
"and",
"longitude",
"."
] | python | train |
tomplus/kubernetes_asyncio | kubernetes_asyncio/client/api/core_v1_api.py | https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L17375-L17398 | def patch_persistent_volume(self, name, body, **kwargs): # noqa: E501
"""patch_persistent_volume # noqa: E501
partially update the specified PersistentVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_persistent_volume(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PersistentVolume (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1PersistentVolume
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_persistent_volume_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_persistent_volume_with_http_info(name, body, **kwargs) # noqa: E501
return data | [
"def",
"patch_persistent_volume",
"(",
"self",
",",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"patch_persistent_volume_with_http_info",
"(",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"patch_persistent_volume_with_http_info",
"(",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | patch_persistent_volume # noqa: E501
partially update the specified PersistentVolume # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_persistent_volume(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PersistentVolume (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1PersistentVolume
If the method is called asynchronously,
returns the request thread. | [
"patch_persistent_volume",
"#",
"noqa",
":",
"E501"
] | python | train |
blockstack/blockstack-core | blockstack/lib/subdomains.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/subdomains.py#L485-L497 | def check_initial_subdomain(cls, subdomain_rec):
"""
Verify that a first-ever subdomain record is well-formed.
* n must be 0
* the subdomain must not be independent of its domain
"""
if subdomain_rec.n != 0:
return False
if subdomain_rec.independent:
return False
return True | [
"def",
"check_initial_subdomain",
"(",
"cls",
",",
"subdomain_rec",
")",
":",
"if",
"subdomain_rec",
".",
"n",
"!=",
"0",
":",
"return",
"False",
"if",
"subdomain_rec",
".",
"independent",
":",
"return",
"False",
"return",
"True"
] | Verify that a first-ever subdomain record is well-formed.
* n must be 0
* the subdomain must not be independent of its domain | [
"Verify",
"that",
"a",
"first",
"-",
"ever",
"subdomain",
"record",
"is",
"well",
"-",
"formed",
".",
"*",
"n",
"must",
"be",
"0",
"*",
"the",
"subdomain",
"must",
"not",
"be",
"independent",
"of",
"its",
"domain"
] | python | train |
elifesciences/elife-tools | elifetools/json_rewrite.py | https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/json_rewrite.py#L626-L925 | def rewrite_elife_datasets_json(json_content, doi):
""" this does the work of rewriting elife datasets json """
# Add dates in bulk
elife_dataset_dates = []
elife_dataset_dates.append(("10.7554/eLife.00348", "used", "dataro17", u"2010"))
elife_dataset_dates.append(("10.7554/eLife.01179", "used", "dataro4", u"2016"))
elife_dataset_dates.append(("10.7554/eLife.01603", "used", "dataro2", u"2012"))
elife_dataset_dates.append(("10.7554/eLife.02304", "used", "dataro15", u"2005"))
elife_dataset_dates.append(("10.7554/eLife.02935", "used", "dataro2", u"2014"))
elife_dataset_dates.append(("10.7554/eLife.03583", "used", "dataro5", u"2013"))
if doi in map(lambda dataset: dataset[0], elife_dataset_dates):
for (match_doi, used_or_generated, id, dataset_date) in elife_dataset_dates:
if doi == match_doi:
if json_content.get(used_or_generated):
for dataset in json_content[used_or_generated]:
if dataset.get("id") and dataset["id"] == id:
if not dataset.get("date"):
dataset["date"] = dataset_date
# Continue with individual article JSON rewriting
if doi == "10.7554/eLife.01311":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] in ["dataro3", "dataro4", "dataro5"]:
if not dataset.get("date"):
dataset["date"] = u"2012"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Duke"}]
if dataset.get("id") and dataset["id"] == "dataro6":
if not dataset.get("date"):
dataset["date"] = u"2011"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "FlyBase"}]
if dataset.get("id") and dataset["id"] == "dataro7":
if not dataset.get("date"):
dataset["date"] = u"2011"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Baylor College of Medicine (BCM)"}]
if dataset.get("id") and dataset["id"] in ["dataro8", "dataro9"]:
if not dataset.get("date"):
dataset["date"] = u"2012"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "University of California, Berkeley"}]
if doi == "10.7554/eLife.01440":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "EnsemblMetazoa"}]
if doi == "10.7554/eLife.01535":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date") == "2000, 2005":
dataset["date"] = u"2000"
if doi == "10.7554/eLife.02304":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro11":
if not dataset.get("title"):
dataset["title"] = u"T.gondii LDH1 ternary complex with APAD+ and oxalate"
if doi == "10.7554/eLife.03574":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("date"):
dataset["date"] = u"2006"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Riley,M."}, {"type": "group", "name": "Abe,T."}, {"type": "group", "name": "Arnaud,M.B."}, {"type": "group", "name": "Berlyn,M.K."}, {"type": "group", "name": "Blattner,F.R."}, {"type": "group", "name": "Chaudhuri,R.R."}, {"type": "group", "name": "Glasner,J.D."}, {"type": "group", "name": "Horiuchi,T."}, {"type": "group", "name": "Keseler,I.M."}, {"type": "group", "name": "Kosuge,T."}, {"type": "group", "name": "Mori,H."}, {"type": "group", "name": "Perna,N.T."}, {"type": "group", "name": "Plunkett,G. III"}, {"type": "group", "name": "Rudd,K.E."}, {"type": "group", "name": "Serres,M.H."}, {"type": "group", "name": "Thomas,G.H."}, {"type": "group", "name": "Thomson,N.R."}, {"type": "group", "name": "Wishart,D."}, {"type": "group", "name": "Wanner,B.L."}]
if doi == "10.7554/eLife.03676":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro4":
if not dataset.get("date"):
dataset["date"] = u"2013"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Human Gene Sequencing Center"}]
if doi == "10.7554/eLife.03971":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Vanderperre B."}]
if doi == "10.7554/eLife.04660":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date") == "2014-2015":
dataset["date"] = u"2014"
if doi == "10.7554/eLife.06421":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if dataset.get("date") and dataset.get("date") == "NA":
dataset["date"] = u"2006"
if doi == "10.7554/eLife.08445":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "data-ro1":
if not dataset.get("date"):
dataset["date"] = u"2006"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "BDTNP SELEX"}]
if doi == "10.7554/eLife.08916":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if dataset.get("date") and dataset.get("date") == "2008, updated 2014":
dataset["date"] = u"2008"
if dataset.get("id") and dataset["id"] == "dataro3":
if dataset.get("date") and dataset.get("date") == "2013, updated 2014":
dataset["date"] = u"2013"
if doi == "10.7554/eLife.08955":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Kurdistani S"}, {"type": "group", "name": "Marrban C"}, {"type": "group", "name": "Su T"}]
if doi == "10.7554/eLife.09207":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Prostate Cancer Genome Sequencing Project"}]
if doi == "10.7554/eLife.10607":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "data-ro4":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Authors"}]
if doi == "10.7554/eLife.10670":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "data-ro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "HIVdb"}]
# Add dates, authors, other details
if doi == "10.7554/eLife.10856":
if json_content.get("generated"):
datasets_authors_for_10856 = [{"type": "group", "name": "Dagdas YF"}, {"type": "group", "name": "Belhaj K"}, {"type": "group", "name": "Maqbool A"}, {"type": "group", "name": "Chaparro-Garcia A"}, {"type": "group", "name": "Pandey P"}, {"type": "group", "name": "Petre B"}, {"type": "group", "name": "Tabassum N"}, {"type": "group", "name": "Cruz-Mireles N"}, {"type": "group", "name": "Hughes RK"}, {"type": "group", "name": "Sklenar J"}, {"type": "group", "name": "Win J"}, {"type": "group", "name": "Menke F"}, {"type": "group", "name": "Findlay K"}, {"type": "group", "name": "Banfield MJ"}, {"type": "group", "name": "Kamoun S"}, {"type": "group", "name": "Bozkurt TO"}]
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro7":
if not dataset.get("date"):
dataset["date"] = u"2016"
if not dataset.get("title"):
dataset["title"] = u"An effector of the Irish potato famine pathogen antagonizes a host autophagy cargo receptor"
if not dataset.get("authors"):
dataset["authors"] = datasets_authors_for_10856
if dataset.get("uri") and dataset["uri"] == "http://www.ncbi.nlm.nih.":
dataset["uri"] = "https://www.ncbi.nlm.nih.gov/nuccore/976151098/"
if dataset.get("id") and dataset["id"] == "dataro8":
if not dataset.get("date"):
dataset["date"] = u"2015"
if not dataset.get("title"):
dataset["title"] = u"An effector of the Irish potato famine pathogen antagonizes a host autophagy cargo receptor"
if not dataset.get("authors"):
dataset["authors"] = datasets_authors_for_10856
if dataset.get("uri") and dataset["uri"] == "http://www.ncbi.nlm.nih.":
dataset["uri"] = "https://www.ncbi.nlm.nih.gov/nuccore/976151096/"
if dataset.get("id") and dataset["id"] == "dataro9":
if not dataset.get("authors"):
dataset["authors"] = datasets_authors_for_10856
if doi == "10.7554/eLife.10877":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("title"):
dataset["title"] = u"Oct4 ChIP-Seq at G1 and G2/M phase of cell cycle in mouse embryonic stem cells"
if doi == "10.7554/eLife.10921":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Floor SN"}, {"type": "group", "name": "Doudna JA"}]
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Sidrauski C"}, {"type": "group", "name": "McGeachy A"}, {"type": "group", "name": "Ingolia N"}, {"type": "group", "name": "Walter P"}]
if doi == "10.7554/eLife.11117":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro14":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Authors"}]
if doi == "10.7554/eLife.12204":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Rhodes DR"}, {"type": "group", "name": "Kalyana-Sundaram S"}, {"type": "group", "name": "Mahavisno V"}, {"type": "group", "name": "Varambally R"}, {"type": "group", "name": "Yu J"}, {"type": "group", "name": "Briggs BB"}, {"type": "group", "name": "Barrette TR"}, {"type": "group", "name": "Anstet MJ"}, {"type": "group", "name": "Kincead-Beal C"}, {"type": "group", "name": "Kulkarni P"}, {"type": "group", "name": "Varambally S"}, {"type": "group", "name": "Ghosh D"}, {"type": "group", "name": "Chinnaiyan AM."}]
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Gaspar C"}, {"type": "group", "name": "Cardoso J"}, {"type": "group", "name": "Franken P"}, {"type": "group", "name": "Molenaar L"}, {"type": "group", "name": "Morreau H"}, {"type": "group", "name": "Möslein G"}, {"type": "group", "name": "Sampson J"}, {"type": "group", "name": "Boer JM"}, {"type": "group", "name": "de Menezes RX"}, {"type": "group", "name": "Fodde R."}]
if dataset.get("id") and dataset["id"] == "dataro3":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Graudens E"}, {"type": "group", "name": "Boulanger V"}, {"type": "group", "name": "Mollard C"}, {"type": "group", "name": "Mariage-Samson R"}, {"type": "group", "name": "Barlet X"}, {"type": "group", "name": "Grémy G"}, {"type": "group", "name": "Couillault C"}, {"type": "group", "name": "Lajémi M"}, {"type": "group", "name": "Piatier-Tonneau D"}, {"type": "group", "name": "Zaborski P"}, {"type": "group", "name": "Eveno E"}, {"type": "group", "name": "Auffray C"}, {"type": "group", "name": "Imbeaud S."}]
if dataset.get("id") and dataset["id"] == "dataro4":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Hong Y"}, {"type": "group", "name": "Downey T"}, {"type": "group", "name": "Eu KW"}, {"type": "group", "name": "Koh PK"},{"type": "group", "name": "Cheah PY"}]
if dataset.get("id") and dataset["id"] == "dataro5":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Kaiser S"}, {"type": "group", "name": "Park YK"}, {"type": "group", "name": "Franklin JL"}, {"type": "group", "name": "Halberg RB"}, {"type": "group", "name": "Yu M"}, {"type": "group", "name": "Jessen WJ"}, {"type": "group", "name": "Freudenberg J"}, {"type": "group", "name": "Chen X"}, {"type": "group", "name": "Haigis K"}, {"type": "group", "name": "Jegga AG"}, {"type": "group", "name": "Kong S"}, {"type": "group", "name": "Sakthivel B"}, {"type": "group", "name": "Xu H"}, {"type": "group", "name": "Reichling T"}, {"type": "group", "name": "Azhar M"}, {"type": "group", "name": "Boivin GP"}, {"type": "group", "name": "Roberts RB"}, {"type": "group", "name": "Bissahoyo AC"}, {"type": "group", "name": "Gonzales F"}, {"type": "group", "name": "Bloom GC"}, {"type": "group", "name": "Eschrich S"}, {"type": "group", "name": "Carter SL"}, {"type": "group", "name": "Aronow JE"}, {"type": "group", "name": "Kleimeyer J"}, {"type": "group", "name": "Kleimeyer M"}, {"type": "group", "name": "Ramaswamy V"}, {"type": "group", "name": "Settle SH"}, {"type": "group", "name": "Boone B"}, {"type": "group", "name": "Levy S"}, {"type": "group", "name": "Graff JM"}, {"type": "group", "name": "Doetschman T"}, {"type": "group", "name": "Groden J"}, {"type": "group", "name": "Dove WF"}, {"type": "group", "name": "Threadgill DW"}, {"type": "group", "name": "Yeatman TJ"}, {"type": "group", "name": "Coffey RJ Jr"}, {"type": "group", "name": "Aronow BJ."}]
if dataset.get("id") and dataset["id"] == "dataro6":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Muzny DM et al"}]
if dataset.get("id") and dataset["id"] == "dataro7":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Skrzypczak M"}, {"type": "group", "name": "Goryca K"}, {"type": "group", "name": "Rubel T"}, {"type": "group", "name": "Paziewska A"}, {"type": "group", "name": "Mikula M"}, {"type": "group", "name": "Jarosz D"}, {"type": "group", "name": "Pachlewski J"}, {"type": "group", "name": "Oledzki J"}, {"type": "group", "name": "Ostrowski J."}]
if dataset.get("id") and dataset["id"] == "dataro8":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Cancer Genome Atlas Network"}]
if doi == "10.7554/eLife.12876":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Department of Human Genetics, University of Utah"}]
if doi == "10.7554/eLife.13195":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Microbial Ecology Group, Colorado State University"}]
if doi == "10.7554/eLife.14158":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "data-ro1":
if not dataset.get("title"):
dataset["title"] = u"Bacterial initiation protein"
if dataset.get("id") and dataset["id"] == "data-ro2":
if not dataset.get("title"):
dataset["title"] = u"Bacterial initiation protein in complex with Phage inhibitor protein"
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro3":
if not dataset.get("date"):
dataset["date"] = u"2007"
if doi == "10.7554/eLife.14243":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Tramantano M"}, {"type": "group", "name": "Sun L"}, {"type": "group", "name": "Au C"}, {"type": "group", "name": "Labuz D"}, {"type": "group", "name": "Liu Z"}, {"type": "group", "name": "Chou M"}, {"type": "group", "name": "Shen C"}, {"type": "group", "name": "Luk E"}]
if doi == "10.7554/eLife.16078":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date") == "current manuscript":
dataset["date"] = u"2016"
if doi == "10.7554/eLife.17082":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "data-ro4":
if not dataset.get("date"):
dataset["date"] = u"2012"
if dataset.get("id") and dataset["id"] == "data-ro5":
if not dataset.get("date"):
dataset["date"] = u"2014"
if dataset.get("id") and dataset["id"] == "data-ro6":
if not dataset.get("date"):
dataset["date"] = u"2014"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "The Cancer Genome Atlas (TCGA)"}]
if doi == "10.7554/eLife.17473":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date").startswith("Release date"):
dataset["date"] = u"2016"
return json_content | [
"def",
"rewrite_elife_datasets_json",
"(",
"json_content",
",",
"doi",
")",
":",
"# Add dates in bulk",
"elife_dataset_dates",
"=",
"[",
"]",
"elife_dataset_dates",
".",
"append",
"(",
"(",
"\"10.7554/eLife.00348\"",
",",
"\"used\"",
",",
"\"dataro17\"",
",",
"u\"2010\"",
")",
")",
"elife_dataset_dates",
".",
"append",
"(",
"(",
"\"10.7554/eLife.01179\"",
",",
"\"used\"",
",",
"\"dataro4\"",
",",
"u\"2016\"",
")",
")",
"elife_dataset_dates",
".",
"append",
"(",
"(",
"\"10.7554/eLife.01603\"",
",",
"\"used\"",
",",
"\"dataro2\"",
",",
"u\"2012\"",
")",
")",
"elife_dataset_dates",
".",
"append",
"(",
"(",
"\"10.7554/eLife.02304\"",
",",
"\"used\"",
",",
"\"dataro15\"",
",",
"u\"2005\"",
")",
")",
"elife_dataset_dates",
".",
"append",
"(",
"(",
"\"10.7554/eLife.02935\"",
",",
"\"used\"",
",",
"\"dataro2\"",
",",
"u\"2014\"",
")",
")",
"elife_dataset_dates",
".",
"append",
"(",
"(",
"\"10.7554/eLife.03583\"",
",",
"\"used\"",
",",
"\"dataro5\"",
",",
"u\"2013\"",
")",
")",
"if",
"doi",
"in",
"map",
"(",
"lambda",
"dataset",
":",
"dataset",
"[",
"0",
"]",
",",
"elife_dataset_dates",
")",
":",
"for",
"(",
"match_doi",
",",
"used_or_generated",
",",
"id",
",",
"dataset_date",
")",
"in",
"elife_dataset_dates",
":",
"if",
"doi",
"==",
"match_doi",
":",
"if",
"json_content",
".",
"get",
"(",
"used_or_generated",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"used_or_generated",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"id",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"dataset_date",
"# Continue with individual article JSON rewriting",
"if",
"doi",
"==",
"\"10.7554/eLife.01311\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"in",
"[",
"\"dataro3\"",
",",
"\"dataro4\"",
",",
"\"dataro5\"",
"]",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2012\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Duke\"",
"}",
"]",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro6\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2011\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"FlyBase\"",
"}",
"]",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro7\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2011\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Baylor College of Medicine (BCM)\"",
"}",
"]",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"in",
"[",
"\"dataro8\"",
",",
"\"dataro9\"",
"]",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2012\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"University of California, Berkeley\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.01440\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"EnsemblMetazoa\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.01535\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"and",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"==",
"\"2000, 2005\"",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2000\"",
"if",
"doi",
"==",
"\"10.7554/eLife.02304\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro11\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"title\"",
")",
":",
"dataset",
"[",
"\"title\"",
"]",
"=",
"u\"T.gondii LDH1 ternary complex with APAD+ and oxalate\"",
"if",
"doi",
"==",
"\"10.7554/eLife.03574\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro2\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2006\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Riley,M.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Abe,T.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Arnaud,M.B.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Berlyn,M.K.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Blattner,F.R.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Chaudhuri,R.R.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Glasner,J.D.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Horiuchi,T.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Keseler,I.M.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Kosuge,T.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Mori,H.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Perna,N.T.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Plunkett,G. III\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Rudd,K.E.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Serres,M.H.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Thomas,G.H.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Thomson,N.R.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Wishart,D.\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Wanner,B.L.\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.03676\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro4\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2013\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Human Gene Sequencing Center\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.03971\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro2\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Vanderperre B.\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.04660\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"and",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"==",
"\"2014-2015\"",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2014\"",
"if",
"doi",
"==",
"\"10.7554/eLife.06421\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro2\"",
":",
"if",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"and",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"==",
"\"NA\"",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2006\"",
"if",
"doi",
"==",
"\"10.7554/eLife.08445\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"data-ro1\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2006\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"BDTNP SELEX\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.08916\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro2\"",
":",
"if",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"and",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"==",
"\"2008, updated 2014\"",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2008\"",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro3\"",
":",
"if",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"and",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"==",
"\"2013, updated 2014\"",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2013\"",
"if",
"doi",
"==",
"\"10.7554/eLife.08955\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro2\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Kurdistani S\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Marrban C\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Su T\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.09207\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Prostate Cancer Genome Sequencing Project\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.10607\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"data-ro4\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Authors\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.10670\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"data-ro1\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"HIVdb\"",
"}",
"]",
"# Add dates, authors, other details",
"if",
"doi",
"==",
"\"10.7554/eLife.10856\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"datasets_authors_for_10856",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Dagdas YF\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Belhaj K\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Maqbool A\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Chaparro-Garcia A\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Pandey P\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Petre B\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Tabassum N\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Cruz-Mireles N\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Hughes RK\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Sklenar J\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Win J\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Menke F\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Findlay K\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Banfield MJ\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Kamoun S\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Bozkurt TO\"",
"}",
"]",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro7\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2016\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"title\"",
")",
":",
"dataset",
"[",
"\"title\"",
"]",
"=",
"u\"An effector of the Irish potato famine pathogen antagonizes a host autophagy cargo receptor\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"datasets_authors_for_10856",
"if",
"dataset",
".",
"get",
"(",
"\"uri\"",
")",
"and",
"dataset",
"[",
"\"uri\"",
"]",
"==",
"\"http://www.ncbi.nlm.nih.\"",
":",
"dataset",
"[",
"\"uri\"",
"]",
"=",
"\"https://www.ncbi.nlm.nih.gov/nuccore/976151098/\"",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro8\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2015\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"title\"",
")",
":",
"dataset",
"[",
"\"title\"",
"]",
"=",
"u\"An effector of the Irish potato famine pathogen antagonizes a host autophagy cargo receptor\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"datasets_authors_for_10856",
"if",
"dataset",
".",
"get",
"(",
"\"uri\"",
")",
"and",
"dataset",
"[",
"\"uri\"",
"]",
"==",
"\"http://www.ncbi.nlm.nih.\"",
":",
"dataset",
"[",
"\"uri\"",
"]",
"=",
"\"https://www.ncbi.nlm.nih.gov/nuccore/976151096/\"",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro9\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"datasets_authors_for_10856",
"if",
"doi",
"==",
"\"10.7554/eLife.10877\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"title\"",
")",
":",
"dataset",
"[",
"\"title\"",
"]",
"=",
"u\"Oct4 ChIP-Seq at G1 and G2/M phase of cell cycle in mouse embryonic stem cells\"",
"if",
"doi",
"==",
"\"10.7554/eLife.10921\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Floor SN\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Doudna JA\"",
"}",
"]",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro2\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Sidrauski C\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"McGeachy A\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Ingolia N\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Walter P\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.11117\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro14\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Authors\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.12204\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Rhodes DR\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Kalyana-Sundaram S\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Mahavisno V\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Varambally R\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Yu J\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Briggs BB\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Barrette TR\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Anstet MJ\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Kincead-Beal C\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Kulkarni P\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Varambally S\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Ghosh D\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Chinnaiyan AM.\"",
"}",
"]",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro2\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Gaspar C\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Cardoso J\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Franken P\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Molenaar L\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Morreau H\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Möslein G\"}",
",",
" ",
"\"",
"type\":",
" ",
"group\",",
" ",
"name\":",
" ",
"Sampson J\"}",
",",
" ",
"\"",
"type\":",
" ",
"group\",",
" ",
"name\":",
" ",
"Boer JM\"}",
",",
" ",
"\"",
"type\":",
" ",
"group\",",
" ",
"name\":",
" ",
"de Menezes RX\"}",
",",
" ",
"\"",
"type\":",
" ",
"group\",",
" ",
"name\":",
" ",
"Fodde R.\"}",
"]",
"",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro3\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Graudens E\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Boulanger V\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Mollard C\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Mariage-Samson R\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Barlet X\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Grémy G\"}",
",",
" ",
"\"",
"type\":",
" ",
"group\",",
" ",
"name\":",
" ",
"Couillault C\"}",
",",
" ",
"\"",
"type\":",
" ",
"group\",",
" ",
"name\":",
" ",
"Lajémi M\"},",
" ",
"{",
"t",
"ype\": ",
"\"",
"roup\", ",
"\"",
"ame\": ",
"\"",
"iatier-Tonneau D\"},",
" ",
"{",
"t",
"ype\": ",
"\"",
"roup\", ",
"\"",
"ame\": ",
"\"",
"aborski P\"},",
" ",
"{",
"t",
"ype\": ",
"\"",
"roup\", ",
"\"",
"ame\": ",
"\"",
"veno E\"},",
" ",
"{",
"t",
"ype\": ",
"\"",
"roup\", ",
"\"",
"ame\": ",
"\"",
"uffray C\"},",
" ",
"{",
"t",
"ype\": ",
"\"",
"roup\", ",
"\"",
"ame\": ",
"\"",
"mbeaud S.\"}]",
"",
"",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro4\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Hong Y\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Downey T\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Eu KW\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Koh PK\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Cheah PY\"",
"}",
"]",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro5\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Kaiser S\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Park YK\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Franklin JL\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Halberg RB\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Yu M\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Jessen WJ\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Freudenberg J\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Chen X\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Haigis K\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Jegga AG\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Kong S\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Sakthivel B\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Xu H\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Reichling T\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Azhar M\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Boivin GP\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Roberts RB\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Bissahoyo AC\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Gonzales F\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Bloom GC\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Eschrich S\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Carter SL\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Aronow JE\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Kleimeyer J\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Kleimeyer M\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Ramaswamy V\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Settle SH\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Boone B\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Levy S\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Graff JM\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Doetschman T\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Groden J\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Dove WF\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Threadgill DW\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Yeatman TJ\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Coffey RJ Jr\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Aronow BJ.\"",
"}",
"]",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro6\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Muzny DM et al\"",
"}",
"]",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro7\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Skrzypczak M\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Goryca K\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Rubel T\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Paziewska A\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Mikula M\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Jarosz D\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Pachlewski J\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Oledzki J\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Ostrowski J.\"",
"}",
"]",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro8\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Cancer Genome Atlas Network\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.12876\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Department of Human Genetics, University of Utah\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.13195\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Microbial Ecology Group, Colorado State University\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.14158\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"data-ro1\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"title\"",
")",
":",
"dataset",
"[",
"\"title\"",
"]",
"=",
"u\"Bacterial initiation protein\"",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"data-ro2\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"title\"",
")",
":",
"dataset",
"[",
"\"title\"",
"]",
"=",
"u\"Bacterial initiation protein in complex with Phage inhibitor protein\"",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro3\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2007\"",
"if",
"doi",
"==",
"\"10.7554/eLife.14243\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro2\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Tramantano M\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Sun L\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Au C\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Labuz D\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Liu Z\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Chou M\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Shen C\"",
"}",
",",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"Luk E\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.16078\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"and",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"==",
"\"current manuscript\"",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2016\"",
"if",
"doi",
"==",
"\"10.7554/eLife.17082\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"used\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"used\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"data-ro4\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2012\"",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"data-ro5\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2014\"",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"data-ro6\"",
":",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2014\"",
"if",
"not",
"dataset",
".",
"get",
"(",
"\"authors\"",
")",
":",
"dataset",
"[",
"\"authors\"",
"]",
"=",
"[",
"{",
"\"type\"",
":",
"\"group\"",
",",
"\"name\"",
":",
"\"The Cancer Genome Atlas (TCGA)\"",
"}",
"]",
"if",
"doi",
"==",
"\"10.7554/eLife.17473\"",
":",
"if",
"json_content",
".",
"get",
"(",
"\"generated\"",
")",
":",
"for",
"dataset",
"in",
"json_content",
"[",
"\"generated\"",
"]",
":",
"if",
"dataset",
".",
"get",
"(",
"\"id\"",
")",
"and",
"dataset",
"[",
"\"id\"",
"]",
"==",
"\"dataro1\"",
":",
"if",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
"and",
"dataset",
".",
"get",
"(",
"\"date\"",
")",
".",
"startswith",
"(",
"\"Release date\"",
")",
":",
"dataset",
"[",
"\"date\"",
"]",
"=",
"u\"2016\"",
"return",
"json_content"
] | this does the work of rewriting elife datasets json | [
"this",
"does",
"the",
"work",
"of",
"rewriting",
"elife",
"datasets",
"json"
] | python | train |
saltstack/salt | salt/modules/junos.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/junos.py#L279-L353 | def set_hostname(hostname=None, **kwargs):
'''
Set the device's hostname
hostname
The name to be set
comment
Provide a comment to the commit
dev_timeout : 30
The NETCONF RPC timeout (in seconds)
confirm
Provide time in minutes for commit confirmation. If this option is
specified, the commit will be rolled back in the specified amount of time
unless the commit is confirmed.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.set_hostname salt-device
'''
conn = __proxy__['junos.conn']()
ret = {}
if hostname is None:
ret['message'] = 'Please provide the hostname.'
ret['out'] = False
return ret
op = dict()
if '__pub_arg' in kwargs:
if kwargs['__pub_arg']:
if isinstance(kwargs['__pub_arg'][-1], dict):
op.update(kwargs['__pub_arg'][-1])
else:
op.update(kwargs)
# Added to recent versions of JunOs
# Use text format instead
set_string = 'set system host-name {0}'.format(hostname)
try:
conn.cu.load(set_string, format='set')
except Exception as exception:
ret['message'] = 'Could not load configuration due to error "{0}"'.format(
exception)
ret['out'] = False
return ret
try:
commit_ok = conn.cu.commit_check()
except Exception as exception:
ret['message'] = 'Could not commit check due to error "{0}"'.format(
exception)
ret['out'] = False
return ret
if commit_ok:
try:
conn.cu.commit(**op)
ret['message'] = 'Successfully changed hostname.'
ret['out'] = True
except Exception as exception:
ret['out'] = False
ret['message'] = 'Successfully loaded host-name but commit failed with "{0}"'.format(
exception)
return ret
else:
ret['out'] = False
ret[
'message'] = 'Successfully loaded host-name but pre-commit check failed.'
conn.cu.rollback()
return ret | [
"def",
"set_hostname",
"(",
"hostname",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"__proxy__",
"[",
"'junos.conn'",
"]",
"(",
")",
"ret",
"=",
"{",
"}",
"if",
"hostname",
"is",
"None",
":",
"ret",
"[",
"'message'",
"]",
"=",
"'Please provide the hostname.'",
"ret",
"[",
"'out'",
"]",
"=",
"False",
"return",
"ret",
"op",
"=",
"dict",
"(",
")",
"if",
"'__pub_arg'",
"in",
"kwargs",
":",
"if",
"kwargs",
"[",
"'__pub_arg'",
"]",
":",
"if",
"isinstance",
"(",
"kwargs",
"[",
"'__pub_arg'",
"]",
"[",
"-",
"1",
"]",
",",
"dict",
")",
":",
"op",
".",
"update",
"(",
"kwargs",
"[",
"'__pub_arg'",
"]",
"[",
"-",
"1",
"]",
")",
"else",
":",
"op",
".",
"update",
"(",
"kwargs",
")",
"# Added to recent versions of JunOs",
"# Use text format instead",
"set_string",
"=",
"'set system host-name {0}'",
".",
"format",
"(",
"hostname",
")",
"try",
":",
"conn",
".",
"cu",
".",
"load",
"(",
"set_string",
",",
"format",
"=",
"'set'",
")",
"except",
"Exception",
"as",
"exception",
":",
"ret",
"[",
"'message'",
"]",
"=",
"'Could not load configuration due to error \"{0}\"'",
".",
"format",
"(",
"exception",
")",
"ret",
"[",
"'out'",
"]",
"=",
"False",
"return",
"ret",
"try",
":",
"commit_ok",
"=",
"conn",
".",
"cu",
".",
"commit_check",
"(",
")",
"except",
"Exception",
"as",
"exception",
":",
"ret",
"[",
"'message'",
"]",
"=",
"'Could not commit check due to error \"{0}\"'",
".",
"format",
"(",
"exception",
")",
"ret",
"[",
"'out'",
"]",
"=",
"False",
"return",
"ret",
"if",
"commit_ok",
":",
"try",
":",
"conn",
".",
"cu",
".",
"commit",
"(",
"*",
"*",
"op",
")",
"ret",
"[",
"'message'",
"]",
"=",
"'Successfully changed hostname.'",
"ret",
"[",
"'out'",
"]",
"=",
"True",
"except",
"Exception",
"as",
"exception",
":",
"ret",
"[",
"'out'",
"]",
"=",
"False",
"ret",
"[",
"'message'",
"]",
"=",
"'Successfully loaded host-name but commit failed with \"{0}\"'",
".",
"format",
"(",
"exception",
")",
"return",
"ret",
"else",
":",
"ret",
"[",
"'out'",
"]",
"=",
"False",
"ret",
"[",
"'message'",
"]",
"=",
"'Successfully loaded host-name but pre-commit check failed.'",
"conn",
".",
"cu",
".",
"rollback",
"(",
")",
"return",
"ret"
] | Set the device's hostname
hostname
The name to be set
comment
Provide a comment to the commit
dev_timeout : 30
The NETCONF RPC timeout (in seconds)
confirm
Provide time in minutes for commit confirmation. If this option is
specified, the commit will be rolled back in the specified amount of time
unless the commit is confirmed.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.set_hostname salt-device | [
"Set",
"the",
"device",
"s",
"hostname"
] | python | train |
DeepHorizons/iarm | iarm/arm_instructions/_meta.py | https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm/arm_instructions/_meta.py#L20-L40 | def parse_lines(self, code):
"""
Return a list of the parsed code
For each line, return a three-tuple containing:
1. The label
2. The instruction
3. Any arguments or parameters
An element in the tuple may be None or '' if it did not find anything
:param code: The code to parse
:return: A list of tuples in the form of (label, instruction, parameters)
"""
remove_comments = re.compile(r'^([^;@\n]*);?.*$', re.MULTILINE)
code = '\n'.join(remove_comments.findall(code)) # TODO can probably do this better
# TODO labels with spaces between pipes is allowed `|label with space| INST OPER`
parser = re.compile(r'^(\S*)?[\s]*(\S*)([^\n]*)$', re.MULTILINE)
res = parser.findall(code)
# Make all parsing of labels and instructions adhere to all uppercase
res = [(label.upper(), instruction.upper(), parameters.strip()) for (label, instruction, parameters) in res]
return res | [
"def",
"parse_lines",
"(",
"self",
",",
"code",
")",
":",
"remove_comments",
"=",
"re",
".",
"compile",
"(",
"r'^([^;@\\n]*);?.*$'",
",",
"re",
".",
"MULTILINE",
")",
"code",
"=",
"'\\n'",
".",
"join",
"(",
"remove_comments",
".",
"findall",
"(",
"code",
")",
")",
"# TODO can probably do this better",
"# TODO labels with spaces between pipes is allowed `|label with space| INST OPER`",
"parser",
"=",
"re",
".",
"compile",
"(",
"r'^(\\S*)?[\\s]*(\\S*)([^\\n]*)$'",
",",
"re",
".",
"MULTILINE",
")",
"res",
"=",
"parser",
".",
"findall",
"(",
"code",
")",
"# Make all parsing of labels and instructions adhere to all uppercase",
"res",
"=",
"[",
"(",
"label",
".",
"upper",
"(",
")",
",",
"instruction",
".",
"upper",
"(",
")",
",",
"parameters",
".",
"strip",
"(",
")",
")",
"for",
"(",
"label",
",",
"instruction",
",",
"parameters",
")",
"in",
"res",
"]",
"return",
"res"
] | Return a list of the parsed code
For each line, return a three-tuple containing:
1. The label
2. The instruction
3. Any arguments or parameters
An element in the tuple may be None or '' if it did not find anything
:param code: The code to parse
:return: A list of tuples in the form of (label, instruction, parameters) | [
"Return",
"a",
"list",
"of",
"the",
"parsed",
"code"
] | python | train |
daknuett/py_register_machine2 | app/web/model.py | https://github.com/daknuett/py_register_machine2/blob/599c53cd7576297d0d7a53344ed5d9aa98acc751/app/web/model.py#L174-L179 | def get_ram(self, format_ = "nl"):
"""
return a string representations of the ram
"""
ram = [self.ram.read(i) for i in range(self.ram.size)]
return self._format_mem(ram, format_) | [
"def",
"get_ram",
"(",
"self",
",",
"format_",
"=",
"\"nl\"",
")",
":",
"ram",
"=",
"[",
"self",
".",
"ram",
".",
"read",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"ram",
".",
"size",
")",
"]",
"return",
"self",
".",
"_format_mem",
"(",
"ram",
",",
"format_",
")"
] | return a string representations of the ram | [
"return",
"a",
"string",
"representations",
"of",
"the",
"ram"
] | python | train |
pypa/pipenv | pipenv/vendor/pexpect/spawnbase.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/spawnbase.py#L157-L180 | def read_nonblocking(self, size=1, timeout=None):
"""This reads data from the file descriptor.
This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
The timeout parameter is ignored.
"""
try:
s = os.read(self.child_fd, size)
except OSError as err:
if err.args[0] == errno.EIO:
# Linux-style EOF
self.flag_eof = True
raise EOF('End Of File (EOF). Exception style platform.')
raise
if s == b'':
# BSD-style EOF
self.flag_eof = True
raise EOF('End Of File (EOF). Empty string style platform.')
s = self._decoder.decode(s, final=False)
self._log(s, 'read')
return s | [
"def",
"read_nonblocking",
"(",
"self",
",",
"size",
"=",
"1",
",",
"timeout",
"=",
"None",
")",
":",
"try",
":",
"s",
"=",
"os",
".",
"read",
"(",
"self",
".",
"child_fd",
",",
"size",
")",
"except",
"OSError",
"as",
"err",
":",
"if",
"err",
".",
"args",
"[",
"0",
"]",
"==",
"errno",
".",
"EIO",
":",
"# Linux-style EOF",
"self",
".",
"flag_eof",
"=",
"True",
"raise",
"EOF",
"(",
"'End Of File (EOF). Exception style platform.'",
")",
"raise",
"if",
"s",
"==",
"b''",
":",
"# BSD-style EOF",
"self",
".",
"flag_eof",
"=",
"True",
"raise",
"EOF",
"(",
"'End Of File (EOF). Empty string style platform.'",
")",
"s",
"=",
"self",
".",
"_decoder",
".",
"decode",
"(",
"s",
",",
"final",
"=",
"False",
")",
"self",
".",
"_log",
"(",
"s",
",",
"'read'",
")",
"return",
"s"
] | This reads data from the file descriptor.
This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
The timeout parameter is ignored. | [
"This",
"reads",
"data",
"from",
"the",
"file",
"descriptor",
"."
] | python | train |
openwisp/netjsonconfig | netjsonconfig/backends/openwrt/converters/interfaces.py | https://github.com/openwisp/netjsonconfig/blob/c23ce9732720856e2f6dc54060db71a8182c7d4b/netjsonconfig/backends/openwrt/converters/interfaces.py#L83-L112 | def __intermediate_interface(self, interface, uci_name):
"""
converts NetJSON interface to
UCI intermediate data structure
"""
interface.update({
'.type': 'interface',
'.name': uci_name,
'ifname': interface.pop('name')
})
if 'network' in interface:
del interface['network']
if 'mac' in interface:
# mac address of wireless interface must
# be set in /etc/config/wireless, therfore
# we can skip this in /etc/config/network
if interface.get('type') != 'wireless':
interface['macaddr'] = interface['mac']
del interface['mac']
if 'autostart' in interface:
interface['auto'] = interface['autostart']
del interface['autostart']
if 'disabled' in interface:
interface['enabled'] = not interface['disabled']
del interface['disabled']
if 'wireless' in interface:
del interface['wireless']
if 'addresses' in interface:
del interface['addresses']
return interface | [
"def",
"__intermediate_interface",
"(",
"self",
",",
"interface",
",",
"uci_name",
")",
":",
"interface",
".",
"update",
"(",
"{",
"'.type'",
":",
"'interface'",
",",
"'.name'",
":",
"uci_name",
",",
"'ifname'",
":",
"interface",
".",
"pop",
"(",
"'name'",
")",
"}",
")",
"if",
"'network'",
"in",
"interface",
":",
"del",
"interface",
"[",
"'network'",
"]",
"if",
"'mac'",
"in",
"interface",
":",
"# mac address of wireless interface must",
"# be set in /etc/config/wireless, therfore",
"# we can skip this in /etc/config/network",
"if",
"interface",
".",
"get",
"(",
"'type'",
")",
"!=",
"'wireless'",
":",
"interface",
"[",
"'macaddr'",
"]",
"=",
"interface",
"[",
"'mac'",
"]",
"del",
"interface",
"[",
"'mac'",
"]",
"if",
"'autostart'",
"in",
"interface",
":",
"interface",
"[",
"'auto'",
"]",
"=",
"interface",
"[",
"'autostart'",
"]",
"del",
"interface",
"[",
"'autostart'",
"]",
"if",
"'disabled'",
"in",
"interface",
":",
"interface",
"[",
"'enabled'",
"]",
"=",
"not",
"interface",
"[",
"'disabled'",
"]",
"del",
"interface",
"[",
"'disabled'",
"]",
"if",
"'wireless'",
"in",
"interface",
":",
"del",
"interface",
"[",
"'wireless'",
"]",
"if",
"'addresses'",
"in",
"interface",
":",
"del",
"interface",
"[",
"'addresses'",
"]",
"return",
"interface"
] | converts NetJSON interface to
UCI intermediate data structure | [
"converts",
"NetJSON",
"interface",
"to",
"UCI",
"intermediate",
"data",
"structure"
] | python | valid |
SergeySatskiy/cdm-pythonparser | cdmpyparser.py | https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L124-L128 | def getDisplayName(self):
"""Provides a name for display purpose respecting the alias"""
if self.alias == "":
return self.name
return self.name + " as " + self.alias | [
"def",
"getDisplayName",
"(",
"self",
")",
":",
"if",
"self",
".",
"alias",
"==",
"\"\"",
":",
"return",
"self",
".",
"name",
"return",
"self",
".",
"name",
"+",
"\" as \"",
"+",
"self",
".",
"alias"
] | Provides a name for display purpose respecting the alias | [
"Provides",
"a",
"name",
"for",
"display",
"purpose",
"respecting",
"the",
"alias"
] | python | train |
Jajcus/pyxmpp2 | pyxmpp2/stanzaprocessor.py | https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/stanzaprocessor.py#L145-L193 | def _process_iq_response(self, stanza):
"""Process IQ stanza of type 'response' or 'error'.
:Parameters:
- `stanza`: the stanza received
:Types:
- `stanza`: `Iq`
If a matching handler is available pass the stanza to it. Otherwise
ignore it if it is "error" or "result" stanza or return
"feature-not-implemented" error if it is "get" or "set".
"""
stanza_id = stanza.stanza_id
from_jid = stanza.from_jid
if from_jid:
ufrom = from_jid.as_unicode()
else:
ufrom = None
res_handler = err_handler = None
try:
res_handler, err_handler = self._iq_response_handlers.pop(
(stanza_id, ufrom))
except KeyError:
logger.debug("No response handler for id={0!r} from={1!r}"
.format(stanza_id, ufrom))
logger.debug(" from_jid: {0!r} peer: {1!r} me: {2!r}"
.format(from_jid, self.peer, self.me))
if ( (from_jid == self.peer or from_jid == self.me
or self.me and from_jid == self.me.bare()) ):
try:
logger.debug(" trying id={0!r} from=None"
.format(stanza_id))
res_handler, err_handler = \
self._iq_response_handlers.pop(
(stanza_id, None))
except KeyError:
pass
if stanza.stanza_type == "result":
if res_handler:
response = res_handler(stanza)
else:
return False
else:
if err_handler:
response = err_handler(stanza)
else:
return False
self._process_handler_result(response)
return True | [
"def",
"_process_iq_response",
"(",
"self",
",",
"stanza",
")",
":",
"stanza_id",
"=",
"stanza",
".",
"stanza_id",
"from_jid",
"=",
"stanza",
".",
"from_jid",
"if",
"from_jid",
":",
"ufrom",
"=",
"from_jid",
".",
"as_unicode",
"(",
")",
"else",
":",
"ufrom",
"=",
"None",
"res_handler",
"=",
"err_handler",
"=",
"None",
"try",
":",
"res_handler",
",",
"err_handler",
"=",
"self",
".",
"_iq_response_handlers",
".",
"pop",
"(",
"(",
"stanza_id",
",",
"ufrom",
")",
")",
"except",
"KeyError",
":",
"logger",
".",
"debug",
"(",
"\"No response handler for id={0!r} from={1!r}\"",
".",
"format",
"(",
"stanza_id",
",",
"ufrom",
")",
")",
"logger",
".",
"debug",
"(",
"\" from_jid: {0!r} peer: {1!r} me: {2!r}\"",
".",
"format",
"(",
"from_jid",
",",
"self",
".",
"peer",
",",
"self",
".",
"me",
")",
")",
"if",
"(",
"(",
"from_jid",
"==",
"self",
".",
"peer",
"or",
"from_jid",
"==",
"self",
".",
"me",
"or",
"self",
".",
"me",
"and",
"from_jid",
"==",
"self",
".",
"me",
".",
"bare",
"(",
")",
")",
")",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\" trying id={0!r} from=None\"",
".",
"format",
"(",
"stanza_id",
")",
")",
"res_handler",
",",
"err_handler",
"=",
"self",
".",
"_iq_response_handlers",
".",
"pop",
"(",
"(",
"stanza_id",
",",
"None",
")",
")",
"except",
"KeyError",
":",
"pass",
"if",
"stanza",
".",
"stanza_type",
"==",
"\"result\"",
":",
"if",
"res_handler",
":",
"response",
"=",
"res_handler",
"(",
"stanza",
")",
"else",
":",
"return",
"False",
"else",
":",
"if",
"err_handler",
":",
"response",
"=",
"err_handler",
"(",
"stanza",
")",
"else",
":",
"return",
"False",
"self",
".",
"_process_handler_result",
"(",
"response",
")",
"return",
"True"
] | Process IQ stanza of type 'response' or 'error'.
:Parameters:
- `stanza`: the stanza received
:Types:
- `stanza`: `Iq`
If a matching handler is available pass the stanza to it. Otherwise
ignore it if it is "error" or "result" stanza or return
"feature-not-implemented" error if it is "get" or "set". | [
"Process",
"IQ",
"stanza",
"of",
"type",
"response",
"or",
"error",
"."
] | python | valid |
mitsei/dlkit | dlkit/json_/learning/managers.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/managers.py#L2005-L2022 | def get_activity_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the activity administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ActivityAdminSession) - an
``ActivityAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_activity_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` is ``true``.*
"""
if not self.supports_activity_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ActivityAdminSession(proxy=proxy, runtime=self._runtime) | [
"def",
"get_activity_admin_session",
"(",
"self",
",",
"proxy",
")",
":",
"if",
"not",
"self",
".",
"supports_activity_admin",
"(",
")",
":",
"raise",
"errors",
".",
"Unimplemented",
"(",
")",
"# pylint: disable=no-member",
"return",
"sessions",
".",
"ActivityAdminSession",
"(",
"proxy",
"=",
"proxy",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")"
] | Gets the ``OsidSession`` associated with the activity administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ActivityAdminSession) - an
``ActivityAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_activity_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_activity_admin()`` is ``true``.* | [
"Gets",
"the",
"OsidSession",
"associated",
"with",
"the",
"activity",
"administration",
"service",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/requests/sessions.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/sessions.py#L617-L688 | def send(self, request, **kwargs):
"""Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = preferred_clock()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
elapsed = preferred_clock() - start
r.elapsed = timedelta(seconds=elapsed)
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
# If redirects aren't being followed, store the response on the Request for Response.next().
if not allow_redirects:
try:
r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs))
except StopIteration:
pass
if not stream:
r.content
return r | [
"def",
"send",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"# Set defaults that the hooks can utilize to ensure they always have",
"# the correct parameters to reproduce the previous request.",
"kwargs",
".",
"setdefault",
"(",
"'stream'",
",",
"self",
".",
"stream",
")",
"kwargs",
".",
"setdefault",
"(",
"'verify'",
",",
"self",
".",
"verify",
")",
"kwargs",
".",
"setdefault",
"(",
"'cert'",
",",
"self",
".",
"cert",
")",
"kwargs",
".",
"setdefault",
"(",
"'proxies'",
",",
"self",
".",
"proxies",
")",
"# It's possible that users might accidentally send a Request object.",
"# Guard against that specific failure case.",
"if",
"isinstance",
"(",
"request",
",",
"Request",
")",
":",
"raise",
"ValueError",
"(",
"'You can only send PreparedRequests.'",
")",
"# Set up variables needed for resolve_redirects and dispatching of hooks",
"allow_redirects",
"=",
"kwargs",
".",
"pop",
"(",
"'allow_redirects'",
",",
"True",
")",
"stream",
"=",
"kwargs",
".",
"get",
"(",
"'stream'",
")",
"hooks",
"=",
"request",
".",
"hooks",
"# Get the appropriate adapter to use",
"adapter",
"=",
"self",
".",
"get_adapter",
"(",
"url",
"=",
"request",
".",
"url",
")",
"# Start time (approximately) of the request",
"start",
"=",
"preferred_clock",
"(",
")",
"# Send the request",
"r",
"=",
"adapter",
".",
"send",
"(",
"request",
",",
"*",
"*",
"kwargs",
")",
"# Total elapsed time of the request (approximately)",
"elapsed",
"=",
"preferred_clock",
"(",
")",
"-",
"start",
"r",
".",
"elapsed",
"=",
"timedelta",
"(",
"seconds",
"=",
"elapsed",
")",
"# Response manipulation hooks",
"r",
"=",
"dispatch_hook",
"(",
"'response'",
",",
"hooks",
",",
"r",
",",
"*",
"*",
"kwargs",
")",
"# Persist cookies",
"if",
"r",
".",
"history",
":",
"# If the hooks create history then we want those cookies too",
"for",
"resp",
"in",
"r",
".",
"history",
":",
"extract_cookies_to_jar",
"(",
"self",
".",
"cookies",
",",
"resp",
".",
"request",
",",
"resp",
".",
"raw",
")",
"extract_cookies_to_jar",
"(",
"self",
".",
"cookies",
",",
"request",
",",
"r",
".",
"raw",
")",
"# Redirect resolving generator.",
"gen",
"=",
"self",
".",
"resolve_redirects",
"(",
"r",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
"# Resolve redirects if allowed.",
"history",
"=",
"[",
"resp",
"for",
"resp",
"in",
"gen",
"]",
"if",
"allow_redirects",
"else",
"[",
"]",
"# Shuffle things around if there's history.",
"if",
"history",
":",
"# Insert the first (original) request at the start",
"history",
".",
"insert",
"(",
"0",
",",
"r",
")",
"# Get the last request made",
"r",
"=",
"history",
".",
"pop",
"(",
")",
"r",
".",
"history",
"=",
"history",
"# If redirects aren't being followed, store the response on the Request for Response.next().",
"if",
"not",
"allow_redirects",
":",
"try",
":",
"r",
".",
"_next",
"=",
"next",
"(",
"self",
".",
"resolve_redirects",
"(",
"r",
",",
"request",
",",
"yield_requests",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
")",
"except",
"StopIteration",
":",
"pass",
"if",
"not",
"stream",
":",
"r",
".",
"content",
"return",
"r"
] | Send a given PreparedRequest.
:rtype: requests.Response | [
"Send",
"a",
"given",
"PreparedRequest",
"."
] | python | train |
zeth/inputs | inputs.py | https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L1641-L1647 | def get_fptr(self):
"""Get the function pointer."""
cmpfunc = ctypes.CFUNCTYPE(ctypes.c_int,
WPARAM,
LPARAM,
ctypes.POINTER(KBDLLHookStruct))
return cmpfunc(self.handle_input) | [
"def",
"get_fptr",
"(",
"self",
")",
":",
"cmpfunc",
"=",
"ctypes",
".",
"CFUNCTYPE",
"(",
"ctypes",
".",
"c_int",
",",
"WPARAM",
",",
"LPARAM",
",",
"ctypes",
".",
"POINTER",
"(",
"KBDLLHookStruct",
")",
")",
"return",
"cmpfunc",
"(",
"self",
".",
"handle_input",
")"
] | Get the function pointer. | [
"Get",
"the",
"function",
"pointer",
"."
] | python | train |
ktdreyer/txbugzilla | examples/find-external.py | https://github.com/ktdreyer/txbugzilla/blob/ccfc6667ce9d696b08b468b25c813cc2b68d30d6/examples/find-external.py#L12-L23 | def find_tracker_url(ticket_url):
"""
Given http://tracker.ceph.com/issues/16673 or
tracker.ceph.com/issues/16673, return "http://tracker.ceph.com".
"""
if ticket_url.startswith('http://') or ticket_url.startswith('https://'):
o = urlparse(ticket_url)
scheme, netloc = o.scheme, o.netloc
else:
scheme = 'http'
(netloc, _) = ticket_url.split('/', 1)
return '%s://%s' % (scheme, netloc) | [
"def",
"find_tracker_url",
"(",
"ticket_url",
")",
":",
"if",
"ticket_url",
".",
"startswith",
"(",
"'http://'",
")",
"or",
"ticket_url",
".",
"startswith",
"(",
"'https://'",
")",
":",
"o",
"=",
"urlparse",
"(",
"ticket_url",
")",
"scheme",
",",
"netloc",
"=",
"o",
".",
"scheme",
",",
"o",
".",
"netloc",
"else",
":",
"scheme",
"=",
"'http'",
"(",
"netloc",
",",
"_",
")",
"=",
"ticket_url",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"return",
"'%s://%s'",
"%",
"(",
"scheme",
",",
"netloc",
")"
] | Given http://tracker.ceph.com/issues/16673 or
tracker.ceph.com/issues/16673, return "http://tracker.ceph.com". | [
"Given",
"http",
":",
"//",
"tracker",
".",
"ceph",
".",
"com",
"/",
"issues",
"/",
"16673",
"or",
"tracker",
".",
"ceph",
".",
"com",
"/",
"issues",
"/",
"16673",
"return",
"http",
":",
"//",
"tracker",
".",
"ceph",
".",
"com",
"."
] | python | train |
ajk8/microcache | microcache/__init__.py | https://github.com/ajk8/microcache/blob/24876c2c5f8959a806e2701adb7efbf70a87a1ae/microcache/__init__.py#L208-L214 | def enable(self):
"""
(Re)enable the cache
"""
logger.debug('enable()')
self.options.enabled = True
logger.info('cache enabled') | [
"def",
"enable",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'enable()'",
")",
"self",
".",
"options",
".",
"enabled",
"=",
"True",
"logger",
".",
"info",
"(",
"'cache enabled'",
")"
] | (Re)enable the cache | [
"(",
"Re",
")",
"enable",
"the",
"cache"
] | python | train |
kontron/python-aardvark | pyaardvark/aardvark.py | https://github.com/kontron/python-aardvark/blob/9827f669fbdc5bceb98e7d08a294b4e4e455d0d5/pyaardvark/aardvark.py#L559-L575 | def i2c_monitor_read(self):
"""Retrieved any data fetched by the monitor.
This function has an integrated timeout mechanism. You should use
:func:`poll` to determine if there is any data available.
Returns a list of data bytes and special symbols. There are three
special symbols: `I2C_MONITOR_NACK`, I2C_MONITOR_START and
I2C_MONITOR_STOP.
"""
data = array.array('H', (0,) * self.BUFFER_SIZE)
ret = api.py_aa_i2c_monitor_read(self.handle, self.BUFFER_SIZE,
data)
_raise_error_if_negative(ret)
del data[ret:]
return data.tolist() | [
"def",
"i2c_monitor_read",
"(",
"self",
")",
":",
"data",
"=",
"array",
".",
"array",
"(",
"'H'",
",",
"(",
"0",
",",
")",
"*",
"self",
".",
"BUFFER_SIZE",
")",
"ret",
"=",
"api",
".",
"py_aa_i2c_monitor_read",
"(",
"self",
".",
"handle",
",",
"self",
".",
"BUFFER_SIZE",
",",
"data",
")",
"_raise_error_if_negative",
"(",
"ret",
")",
"del",
"data",
"[",
"ret",
":",
"]",
"return",
"data",
".",
"tolist",
"(",
")"
] | Retrieved any data fetched by the monitor.
This function has an integrated timeout mechanism. You should use
:func:`poll` to determine if there is any data available.
Returns a list of data bytes and special symbols. There are three
special symbols: `I2C_MONITOR_NACK`, I2C_MONITOR_START and
I2C_MONITOR_STOP. | [
"Retrieved",
"any",
"data",
"fetched",
"by",
"the",
"monitor",
"."
] | python | train |
DataDog/integrations-core | datadog_checks_base/datadog_checks/base/utils/platform.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/utils/platform.py#L65-L68 | def is_unix(name=None):
""" Return true if the platform is a unix, False otherwise. """
name = name or sys.platform
return Platform.is_darwin(name) or Platform.is_linux(name) or Platform.is_freebsd(name) | [
"def",
"is_unix",
"(",
"name",
"=",
"None",
")",
":",
"name",
"=",
"name",
"or",
"sys",
".",
"platform",
"return",
"Platform",
".",
"is_darwin",
"(",
"name",
")",
"or",
"Platform",
".",
"is_linux",
"(",
"name",
")",
"or",
"Platform",
".",
"is_freebsd",
"(",
"name",
")"
] | Return true if the platform is a unix, False otherwise. | [
"Return",
"true",
"if",
"the",
"platform",
"is",
"a",
"unix",
"False",
"otherwise",
"."
] | python | train |
FNNDSC/pfurl | pfurl/pfurl.py | https://github.com/FNNDSC/pfurl/blob/572f634ab582b7b7b7a3fbfd5bf12aadc1ba7958/pfurl/pfurl.py#L190-L221 | def storage_resolveBasedOnKey(self, *args, **kwargs):
"""
Call the remote service and ask for the storage location based on the key.
:param args:
:param kwargs:
:return:
"""
global Gd_internalvar
d_msg = {
'action': 'internalctl',
'meta': {
'var': 'key2address',
'compute': '<key>'
}
}
str_key = ""
b_status = False
for k,v in kwargs.items():
if k == 'key': str_key = v
d_msg['meta']['key'] = str_key
#
d_ret = self.pullPath_core(d_msg = d_msg)
return {
'status': b_status,
'path': str_internalLocation
} | [
"def",
"storage_resolveBasedOnKey",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"global",
"Gd_internalvar",
"d_msg",
"=",
"{",
"'action'",
":",
"'internalctl'",
",",
"'meta'",
":",
"{",
"'var'",
":",
"'key2address'",
",",
"'compute'",
":",
"'<key>'",
"}",
"}",
"str_key",
"=",
"\"\"",
"b_status",
"=",
"False",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"'key'",
":",
"str_key",
"=",
"v",
"d_msg",
"[",
"'meta'",
"]",
"[",
"'key'",
"]",
"=",
"str_key",
"# ",
"d_ret",
"=",
"self",
".",
"pullPath_core",
"(",
"d_msg",
"=",
"d_msg",
")",
"return",
"{",
"'status'",
":",
"b_status",
",",
"'path'",
":",
"str_internalLocation",
"}"
] | Call the remote service and ask for the storage location based on the key.
:param args:
:param kwargs:
:return: | [
"Call",
"the",
"remote",
"service",
"and",
"ask",
"for",
"the",
"storage",
"location",
"based",
"on",
"the",
"key",
"."
] | python | train |
lablup/backend.ai-client-py | src/ai/backend/client/cli/files.py | https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/files.py#L17-L35 | def upload(sess_id_or_alias, files):
"""
Upload files to user's home folder.
\b
SESSID: Session ID or its alias given when creating the session.
FILES: Path to upload.
"""
if len(files) < 1:
return
with Session() as session:
try:
print_wait('Uploading files...')
kernel = session.Kernel(sess_id_or_alias)
kernel.upload(files, show_progress=True)
print_done('Uploaded.')
except Exception as e:
print_error(e)
sys.exit(1) | [
"def",
"upload",
"(",
"sess_id_or_alias",
",",
"files",
")",
":",
"if",
"len",
"(",
"files",
")",
"<",
"1",
":",
"return",
"with",
"Session",
"(",
")",
"as",
"session",
":",
"try",
":",
"print_wait",
"(",
"'Uploading files...'",
")",
"kernel",
"=",
"session",
".",
"Kernel",
"(",
"sess_id_or_alias",
")",
"kernel",
".",
"upload",
"(",
"files",
",",
"show_progress",
"=",
"True",
")",
"print_done",
"(",
"'Uploaded.'",
")",
"except",
"Exception",
"as",
"e",
":",
"print_error",
"(",
"e",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | Upload files to user's home folder.
\b
SESSID: Session ID or its alias given when creating the session.
FILES: Path to upload. | [
"Upload",
"files",
"to",
"user",
"s",
"home",
"folder",
"."
] | python | train |
iDigBio/idigbio-python-client | idigbio/pandas_client.py | https://github.com/iDigBio/idigbio-python-client/blob/e896075b9fed297fc420caf303b3bb5a2298d969/idigbio/pandas_client.py#L51-L63 | def search_records(self, **kwargs):
"""
rq Search Query in iDigBio Query Format, using Record Query Fields
sort field to sort on, pick from Record Query Fields
fields a list of fields to return, specified using the fieldName parameter from Fields with type records
fields_exclude a list of fields to exclude, specified using the fieldName parameter from Fields with type records
limit max results
offset skip results
Returns idigbio record format (legacy api), plus additional top level keys with parsed index terms. Returns None on error.
"""
return self.__search_base(apifn=self.__api.search_records, **kwargs) | [
"def",
"search_records",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"__search_base",
"(",
"apifn",
"=",
"self",
".",
"__api",
".",
"search_records",
",",
"*",
"*",
"kwargs",
")"
] | rq Search Query in iDigBio Query Format, using Record Query Fields
sort field to sort on, pick from Record Query Fields
fields a list of fields to return, specified using the fieldName parameter from Fields with type records
fields_exclude a list of fields to exclude, specified using the fieldName parameter from Fields with type records
limit max results
offset skip results
Returns idigbio record format (legacy api), plus additional top level keys with parsed index terms. Returns None on error. | [
"rq",
"Search",
"Query",
"in",
"iDigBio",
"Query",
"Format",
"using",
"Record",
"Query",
"Fields",
"sort",
"field",
"to",
"sort",
"on",
"pick",
"from",
"Record",
"Query",
"Fields",
"fields",
"a",
"list",
"of",
"fields",
"to",
"return",
"specified",
"using",
"the",
"fieldName",
"parameter",
"from",
"Fields",
"with",
"type",
"records",
"fields_exclude",
"a",
"list",
"of",
"fields",
"to",
"exclude",
"specified",
"using",
"the",
"fieldName",
"parameter",
"from",
"Fields",
"with",
"type",
"records",
"limit",
"max",
"results",
"offset",
"skip",
"results"
] | python | train |
antonybholmes/libdna | libdna/decode.py | https://github.com/antonybholmes/libdna/blob/96badfd33c8896c799b1c633bb9fb75cec65a83a/libdna/decode.py#L495-L539 | def _read_mask(self, l, ret, mask='upper'):
"""
Reads mask from 1 bit file to convert bases to identify poor quality
bases that will either be converted to lowercase or 'N'. In the
2 bit file, 'N' or any other invalid base is written as 'A'.
Therefore the 'N' mask file is required to correctly identify where
invalid bases are.
Parameters
----------
l : tuple
location
ret : list
list of bases which will be modified in place
mask : str, optional
Either 'upper', 'lower', or 'n'. If 'lower', poor quality bases
will be converted to lowercase.
"""
if mask.startswith('u'):
return
file = os.path.join(self.dir, l.chr + ".mask.1bit")
if not os.path.exists(file):
return
if file != self.__mask_file:
print('Caching {}...'.format(file))
f = open(file, 'rb')
self.__mask_data = f.read()
f.close()
self.__mask_file = file
d = DNA2Bit._read1bit(self.__mask_data, l, offset=True)
if mask.startswith(l):
for i in range(0, len(ret)):
if d[i] == 1:
ret[i] = DNA_UC_TO_LC_MAP[ret[i]] #ret[i].lower()
else:
# Use N as mask
for i in range(0, len(ret)):
if d[i] == 1:
ret[i] = DNA_N_UC | [
"def",
"_read_mask",
"(",
"self",
",",
"l",
",",
"ret",
",",
"mask",
"=",
"'upper'",
")",
":",
"if",
"mask",
".",
"startswith",
"(",
"'u'",
")",
":",
"return",
"file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dir",
",",
"l",
".",
"chr",
"+",
"\".mask.1bit\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file",
")",
":",
"return",
"if",
"file",
"!=",
"self",
".",
"__mask_file",
":",
"print",
"(",
"'Caching {}...'",
".",
"format",
"(",
"file",
")",
")",
"f",
"=",
"open",
"(",
"file",
",",
"'rb'",
")",
"self",
".",
"__mask_data",
"=",
"f",
".",
"read",
"(",
")",
"f",
".",
"close",
"(",
")",
"self",
".",
"__mask_file",
"=",
"file",
"d",
"=",
"DNA2Bit",
".",
"_read1bit",
"(",
"self",
".",
"__mask_data",
",",
"l",
",",
"offset",
"=",
"True",
")",
"if",
"mask",
".",
"startswith",
"(",
"l",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"ret",
")",
")",
":",
"if",
"d",
"[",
"i",
"]",
"==",
"1",
":",
"ret",
"[",
"i",
"]",
"=",
"DNA_UC_TO_LC_MAP",
"[",
"ret",
"[",
"i",
"]",
"]",
"#ret[i].lower()",
"else",
":",
"# Use N as mask",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"ret",
")",
")",
":",
"if",
"d",
"[",
"i",
"]",
"==",
"1",
":",
"ret",
"[",
"i",
"]",
"=",
"DNA_N_UC"
] | Reads mask from 1 bit file to convert bases to identify poor quality
bases that will either be converted to lowercase or 'N'. In the
2 bit file, 'N' or any other invalid base is written as 'A'.
Therefore the 'N' mask file is required to correctly identify where
invalid bases are.
Parameters
----------
l : tuple
location
ret : list
list of bases which will be modified in place
mask : str, optional
Either 'upper', 'lower', or 'n'. If 'lower', poor quality bases
will be converted to lowercase. | [
"Reads",
"mask",
"from",
"1",
"bit",
"file",
"to",
"convert",
"bases",
"to",
"identify",
"poor",
"quality",
"bases",
"that",
"will",
"either",
"be",
"converted",
"to",
"lowercase",
"or",
"N",
".",
"In",
"the",
"2",
"bit",
"file",
"N",
"or",
"any",
"other",
"invalid",
"base",
"is",
"written",
"as",
"A",
".",
"Therefore",
"the",
"N",
"mask",
"file",
"is",
"required",
"to",
"correctly",
"identify",
"where",
"invalid",
"bases",
"are",
".",
"Parameters",
"----------",
"l",
":",
"tuple",
"location",
"ret",
":",
"list",
"list",
"of",
"bases",
"which",
"will",
"be",
"modified",
"in",
"place",
"mask",
":",
"str",
"optional",
"Either",
"upper",
"lower",
"or",
"n",
".",
"If",
"lower",
"poor",
"quality",
"bases",
"will",
"be",
"converted",
"to",
"lowercase",
"."
] | python | train |
grahambell/pymoc | lib/pymoc/moc.py | https://github.com/grahambell/pymoc/blob/0e2e57ce07ff3de6ac024627c1fb6ad30c2fde48/lib/pymoc/moc.py#L578-L657 | def normalize(self, max_order=MAX_ORDER):
"""Ensure that the MOC is "well-formed".
This structures the MOC as is required for the FITS and JSON
representation. This method is invoked automatically when writing
to these formats.
The number of cells in the MOC will be minimized, so that
no area of the sky is covered multiple times by cells at
different orders, and if all four neighboring cells are
present at an order (other than order 0), they are merged
into their parent cell at the next lower order.
>>> m = MOC(1, (0, 1, 2, 3))
>>> m.cells
4
>>> m.normalize()
>>> m.cells
1
"""
max_order = self._validate_order(max_order)
# If the MOC is already normalized and we are not being asked
# to reduce the order, then do nothing.
if self.normalized and max_order >= self.order:
return
# Group the pixels by iterating down from the order. At each
# order, where all 4 adjacent pixels are present (or we are above
# the maximum order) they are replaced with a single pixel in the
# next lower order. Otherwise the pixel should appear in the MOC
# unless it is already represented at a lower order.
for order in range(self.order, 0, -1):
pixels = self._orders[order]
next_pixels = self._orders[order - 1]
new_pixels = set()
while pixels:
pixel = pixels.pop()
# Look to lower orders to ensure this pixel isn't
# already covered.
check_pixel = pixel
already_contained = True
for check_order in range(order - 1, -1, -1):
check_pixel >>= 2
if check_pixel in self._orders[check_order]:
break
else:
already_contained = False
# Check whether this order is above the maximum, or
# if we have all 4 adjacent pixels. Also do this if
# the pixel was already contained at a lower level
# so that we can avoid checking the adjacent pixels.
if (already_contained or (order > max_order) or
(((pixel ^ 1) in pixels) and
((pixel ^ 2) in pixels) and
((pixel ^ 3) in pixels))):
pixels.discard(pixel ^ 1)
pixels.discard(pixel ^ 2)
pixels.discard(pixel ^ 3)
if not already_contained:
# Group these pixels by placing the equivalent pixel
# for the next order down in the set.
next_pixels.add(pixel >> 2)
else:
new_pixels.add(pixel)
if new_pixels:
self._orders[order].update(new_pixels)
self._normalized = True | [
"def",
"normalize",
"(",
"self",
",",
"max_order",
"=",
"MAX_ORDER",
")",
":",
"max_order",
"=",
"self",
".",
"_validate_order",
"(",
"max_order",
")",
"# If the MOC is already normalized and we are not being asked",
"# to reduce the order, then do nothing.",
"if",
"self",
".",
"normalized",
"and",
"max_order",
">=",
"self",
".",
"order",
":",
"return",
"# Group the pixels by iterating down from the order. At each",
"# order, where all 4 adjacent pixels are present (or we are above",
"# the maximum order) they are replaced with a single pixel in the",
"# next lower order. Otherwise the pixel should appear in the MOC",
"# unless it is already represented at a lower order.",
"for",
"order",
"in",
"range",
"(",
"self",
".",
"order",
",",
"0",
",",
"-",
"1",
")",
":",
"pixels",
"=",
"self",
".",
"_orders",
"[",
"order",
"]",
"next_pixels",
"=",
"self",
".",
"_orders",
"[",
"order",
"-",
"1",
"]",
"new_pixels",
"=",
"set",
"(",
")",
"while",
"pixels",
":",
"pixel",
"=",
"pixels",
".",
"pop",
"(",
")",
"# Look to lower orders to ensure this pixel isn't",
"# already covered.",
"check_pixel",
"=",
"pixel",
"already_contained",
"=",
"True",
"for",
"check_order",
"in",
"range",
"(",
"order",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"check_pixel",
">>=",
"2",
"if",
"check_pixel",
"in",
"self",
".",
"_orders",
"[",
"check_order",
"]",
":",
"break",
"else",
":",
"already_contained",
"=",
"False",
"# Check whether this order is above the maximum, or",
"# if we have all 4 adjacent pixels. Also do this if",
"# the pixel was already contained at a lower level",
"# so that we can avoid checking the adjacent pixels.",
"if",
"(",
"already_contained",
"or",
"(",
"order",
">",
"max_order",
")",
"or",
"(",
"(",
"(",
"pixel",
"^",
"1",
")",
"in",
"pixels",
")",
"and",
"(",
"(",
"pixel",
"^",
"2",
")",
"in",
"pixels",
")",
"and",
"(",
"(",
"pixel",
"^",
"3",
")",
"in",
"pixels",
")",
")",
")",
":",
"pixels",
".",
"discard",
"(",
"pixel",
"^",
"1",
")",
"pixels",
".",
"discard",
"(",
"pixel",
"^",
"2",
")",
"pixels",
".",
"discard",
"(",
"pixel",
"^",
"3",
")",
"if",
"not",
"already_contained",
":",
"# Group these pixels by placing the equivalent pixel",
"# for the next order down in the set.",
"next_pixels",
".",
"add",
"(",
"pixel",
">>",
"2",
")",
"else",
":",
"new_pixels",
".",
"add",
"(",
"pixel",
")",
"if",
"new_pixels",
":",
"self",
".",
"_orders",
"[",
"order",
"]",
".",
"update",
"(",
"new_pixels",
")",
"self",
".",
"_normalized",
"=",
"True"
] | Ensure that the MOC is "well-formed".
This structures the MOC as is required for the FITS and JSON
representation. This method is invoked automatically when writing
to these formats.
The number of cells in the MOC will be minimized, so that
no area of the sky is covered multiple times by cells at
different orders, and if all four neighboring cells are
present at an order (other than order 0), they are merged
into their parent cell at the next lower order.
>>> m = MOC(1, (0, 1, 2, 3))
>>> m.cells
4
>>> m.normalize()
>>> m.cells
1 | [
"Ensure",
"that",
"the",
"MOC",
"is",
"well",
"-",
"formed",
"."
] | python | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L793-L802 | def _is_control(self, char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char in ['\t', '\n', '\r']:
return False
cat = unicodedata.category(char)
if cat.startswith('C'):
return True
return False | [
"def",
"_is_control",
"(",
"self",
",",
"char",
")",
":",
"# These are technically control characters but we count them as whitespace",
"# characters.",
"if",
"char",
"in",
"[",
"'\\t'",
",",
"'\\n'",
",",
"'\\r'",
"]",
":",
"return",
"False",
"cat",
"=",
"unicodedata",
".",
"category",
"(",
"char",
")",
"if",
"cat",
".",
"startswith",
"(",
"'C'",
")",
":",
"return",
"True",
"return",
"False"
] | Checks whether `chars` is a control character. | [
"Checks",
"whether",
"chars",
"is",
"a",
"control",
"character",
"."
] | python | train |
SheffieldML/GPyOpt | GPyOpt/models/gpmodel.py | https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/models/gpmodel.py#L129-L140 | def predict_withGradients(self, X):
"""
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
"""
if X.ndim==1: X = X[None,:]
m, v = self.model.predict(X)
v = np.clip(v, 1e-10, np.inf)
dmdx, dvdx = self.model.predictive_gradients(X)
dmdx = dmdx[:,:,0]
dsdx = dvdx / (2*np.sqrt(v))
return m, np.sqrt(v), dmdx, dsdx | [
"def",
"predict_withGradients",
"(",
"self",
",",
"X",
")",
":",
"if",
"X",
".",
"ndim",
"==",
"1",
":",
"X",
"=",
"X",
"[",
"None",
",",
":",
"]",
"m",
",",
"v",
"=",
"self",
".",
"model",
".",
"predict",
"(",
"X",
")",
"v",
"=",
"np",
".",
"clip",
"(",
"v",
",",
"1e-10",
",",
"np",
".",
"inf",
")",
"dmdx",
",",
"dvdx",
"=",
"self",
".",
"model",
".",
"predictive_gradients",
"(",
"X",
")",
"dmdx",
"=",
"dmdx",
"[",
":",
",",
":",
",",
"0",
"]",
"dsdx",
"=",
"dvdx",
"/",
"(",
"2",
"*",
"np",
".",
"sqrt",
"(",
"v",
")",
")",
"return",
"m",
",",
"np",
".",
"sqrt",
"(",
"v",
")",
",",
"dmdx",
",",
"dsdx"
] | Returns the mean, standard deviation, mean gradient and standard deviation gradient at X. | [
"Returns",
"the",
"mean",
"standard",
"deviation",
"mean",
"gradient",
"and",
"standard",
"deviation",
"gradient",
"at",
"X",
"."
] | python | train |
mardix/Mocha | mocha/contrib/auth/__init__.py | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/auth/__init__.py#L495-L509 | def unsign_data(self, data, url_safe=True):
"""
Retrieve the signed data. If it is expired, it will throw an exception
:param data: token/signed data
:param url_safe: bool. If true it will allow it to be passed in URL
:return: mixed, the data in its original form
"""
if url_safe:
return utils.unsign_url_safe(data,
secret_key=self.secret_key,
salt=self.user_salt)
else:
return utils.unsign_data(data,
secret_key=self.secret_key,
salt=self.user_salt) | [
"def",
"unsign_data",
"(",
"self",
",",
"data",
",",
"url_safe",
"=",
"True",
")",
":",
"if",
"url_safe",
":",
"return",
"utils",
".",
"unsign_url_safe",
"(",
"data",
",",
"secret_key",
"=",
"self",
".",
"secret_key",
",",
"salt",
"=",
"self",
".",
"user_salt",
")",
"else",
":",
"return",
"utils",
".",
"unsign_data",
"(",
"data",
",",
"secret_key",
"=",
"self",
".",
"secret_key",
",",
"salt",
"=",
"self",
".",
"user_salt",
")"
] | Retrieve the signed data. If it is expired, it will throw an exception
:param data: token/signed data
:param url_safe: bool. If true it will allow it to be passed in URL
:return: mixed, the data in its original form | [
"Retrieve",
"the",
"signed",
"data",
".",
"If",
"it",
"is",
"expired",
"it",
"will",
"throw",
"an",
"exception",
":",
"param",
"data",
":",
"token",
"/",
"signed",
"data",
":",
"param",
"url_safe",
":",
"bool",
".",
"If",
"true",
"it",
"will",
"allow",
"it",
"to",
"be",
"passed",
"in",
"URL",
":",
"return",
":",
"mixed",
"the",
"data",
"in",
"its",
"original",
"form"
] | python | train |
LedgerHQ/btchip-python | btchip/msqr.py | https://github.com/LedgerHQ/btchip-python/blob/fe82d7f5638169f583a445b8e200fd1c9f3ea218/btchip/msqr.py#L84-L94 | def legendre_symbol(a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) / 2, p)
return -1 if ls == p - 1 else ls | [
"def",
"legendre_symbol",
"(",
"a",
",",
"p",
")",
":",
"ls",
"=",
"pow",
"(",
"a",
",",
"(",
"p",
"-",
"1",
")",
"/",
"2",
",",
"p",
")",
"return",
"-",
"1",
"if",
"ls",
"==",
"p",
"-",
"1",
"else",
"ls"
] | Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise. | [
"Compute",
"the",
"Legendre",
"symbol",
"a|p",
"using",
"Euler",
"s",
"criterion",
".",
"p",
"is",
"a",
"prime",
"a",
"is",
"relatively",
"prime",
"to",
"p",
"(",
"if",
"p",
"divides",
"a",
"then",
"a|p",
"=",
"0",
")"
] | python | train |
materialsproject/pymatgen | pymatgen/core/structure.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/structure.py#L2959-L3000 | def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None,
to_unit_cell=True):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
from numpy.linalg import norm
from numpy import cross, eye
from scipy.linalg import expm
if indices is None:
indices = range(len(self))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
coords = ((np.dot(rm, np.array(site.coords - anchor).T)).T + anchor).ravel()
new_site = PeriodicSite(
site.species, coords, self._lattice,
to_unit_cell=to_unit_cell, coords_are_cartesian=True,
properties=site.properties)
self._sites[i] = new_site | [
"def",
"rotate_sites",
"(",
"self",
",",
"indices",
"=",
"None",
",",
"theta",
"=",
"0",
",",
"axis",
"=",
"None",
",",
"anchor",
"=",
"None",
",",
"to_unit_cell",
"=",
"True",
")",
":",
"from",
"numpy",
".",
"linalg",
"import",
"norm",
"from",
"numpy",
"import",
"cross",
",",
"eye",
"from",
"scipy",
".",
"linalg",
"import",
"expm",
"if",
"indices",
"is",
"None",
":",
"indices",
"=",
"range",
"(",
"len",
"(",
"self",
")",
")",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"[",
"0",
",",
"0",
",",
"1",
"]",
"if",
"anchor",
"is",
"None",
":",
"anchor",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"anchor",
"=",
"np",
".",
"array",
"(",
"anchor",
")",
"axis",
"=",
"np",
".",
"array",
"(",
"axis",
")",
"theta",
"%=",
"2",
"*",
"np",
".",
"pi",
"rm",
"=",
"expm",
"(",
"cross",
"(",
"eye",
"(",
"3",
")",
",",
"axis",
"/",
"norm",
"(",
"axis",
")",
")",
"*",
"theta",
")",
"for",
"i",
"in",
"indices",
":",
"site",
"=",
"self",
".",
"_sites",
"[",
"i",
"]",
"coords",
"=",
"(",
"(",
"np",
".",
"dot",
"(",
"rm",
",",
"np",
".",
"array",
"(",
"site",
".",
"coords",
"-",
"anchor",
")",
".",
"T",
")",
")",
".",
"T",
"+",
"anchor",
")",
".",
"ravel",
"(",
")",
"new_site",
"=",
"PeriodicSite",
"(",
"site",
".",
"species",
",",
"coords",
",",
"self",
".",
"_lattice",
",",
"to_unit_cell",
"=",
"to_unit_cell",
",",
"coords_are_cartesian",
"=",
"True",
",",
"properties",
"=",
"site",
".",
"properties",
")",
"self",
".",
"_sites",
"[",
"i",
"]",
"=",
"new_site"
] | Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
to_unit_cell (bool): Whether new sites are transformed to unit
cell | [
"Rotate",
"specific",
"sites",
"by",
"some",
"angle",
"around",
"vector",
"at",
"anchor",
"."
] | python | train |
saltstack/salt | salt/runners/fileserver.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/fileserver.py#L150-L193 | def file_list(saltenv='base', backend=None):
'''
Return a list of files from the salt fileserver
saltenv : base
The salt fileserver environment to be listed
backend
Narrow fileserver backends to a subset of the enabled ones. If all
passed backends start with a minus sign (``-``), then these backends
will be excluded from the enabled backends. However, if there is a mix
of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus sign will
be disregarded.
.. versionadded:: 2015.5.0
.. note:
Keep in mind that executing this function spawns a new process,
separate from the master. This means that if the fileserver
configuration has been changed in some way since the master has been
restarted (e.g. if :conf_master:`fileserver_backend`,
:conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
been updated), then the results of this runner will not accurately
reflect what files are available to minions.
When in doubt, use :py:func:`cp.list_master
<salt.modules.cp.list_master>` to see what files the minion can see,
and always remember to restart the salt-master daemon when updating
the fileserver configuration.
CLI Examples:
.. code-block:: bash
salt-run fileserver.file_list
salt-run fileserver.file_list saltenv=prod
salt-run fileserver.file_list saltenv=dev backend=git
salt-run fileserver.file_list base hg,roots
salt-run fileserver.file_list -git
'''
fileserver = salt.fileserver.Fileserver(__opts__)
load = {'saltenv': saltenv, 'fsbackend': backend}
return fileserver.file_list(load=load) | [
"def",
"file_list",
"(",
"saltenv",
"=",
"'base'",
",",
"backend",
"=",
"None",
")",
":",
"fileserver",
"=",
"salt",
".",
"fileserver",
".",
"Fileserver",
"(",
"__opts__",
")",
"load",
"=",
"{",
"'saltenv'",
":",
"saltenv",
",",
"'fsbackend'",
":",
"backend",
"}",
"return",
"fileserver",
".",
"file_list",
"(",
"load",
"=",
"load",
")"
] | Return a list of files from the salt fileserver
saltenv : base
The salt fileserver environment to be listed
backend
Narrow fileserver backends to a subset of the enabled ones. If all
passed backends start with a minus sign (``-``), then these backends
will be excluded from the enabled backends. However, if there is a mix
of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus sign will
be disregarded.
.. versionadded:: 2015.5.0
.. note:
Keep in mind that executing this function spawns a new process,
separate from the master. This means that if the fileserver
configuration has been changed in some way since the master has been
restarted (e.g. if :conf_master:`fileserver_backend`,
:conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
been updated), then the results of this runner will not accurately
reflect what files are available to minions.
When in doubt, use :py:func:`cp.list_master
<salt.modules.cp.list_master>` to see what files the minion can see,
and always remember to restart the salt-master daemon when updating
the fileserver configuration.
CLI Examples:
.. code-block:: bash
salt-run fileserver.file_list
salt-run fileserver.file_list saltenv=prod
salt-run fileserver.file_list saltenv=dev backend=git
salt-run fileserver.file_list base hg,roots
salt-run fileserver.file_list -git | [
"Return",
"a",
"list",
"of",
"files",
"from",
"the",
"salt",
"fileserver"
] | python | train |
zmathew/django-backbone | backbone/views.py | https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L297-L307 | def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params) | [
"def",
"json_dumps",
"(",
"self",
",",
"data",
",",
"*",
"*",
"options",
")",
":",
"params",
"=",
"{",
"'sort_keys'",
":",
"True",
",",
"'indent'",
":",
"2",
"}",
"params",
".",
"update",
"(",
"options",
")",
"# This code is based off django's built in JSON serializer",
"if",
"json",
".",
"__version__",
".",
"split",
"(",
"'.'",
")",
">=",
"[",
"'2'",
",",
"'1'",
",",
"'3'",
"]",
":",
"# Use JS strings to represent Python Decimal instances (ticket #16850)",
"params",
".",
"update",
"(",
"{",
"'use_decimal'",
":",
"False",
"}",
")",
"return",
"json",
".",
"dumps",
"(",
"data",
",",
"cls",
"=",
"DjangoJSONEncoder",
",",
"*",
"*",
"params",
")"
] | Wrapper around `json.dumps` that uses a special JSON encoder. | [
"Wrapper",
"around",
"json",
".",
"dumps",
"that",
"uses",
"a",
"special",
"JSON",
"encoder",
"."
] | python | train |
ZEDGR/pychal | challonge/api.py | https://github.com/ZEDGR/pychal/blob/3600fa9e0557a2a14eb1ad0c0711d28dad3693d7/challonge/api.py#L64-L91 | def fetch(method, uri, params_prefix=None, **params):
"""Fetch the given uri and return the contents of the response."""
params = _prepare_params(params, params_prefix)
if method == "POST" or method == "PUT":
r_data = {"data": params}
else:
r_data = {"params": params}
# build the HTTP request and use basic authentication
url = "https://%s/%s.json" % (CHALLONGE_API_URL, uri)
try:
response = request(
method,
url,
auth=get_credentials(),
**r_data)
response.raise_for_status()
except HTTPError:
if response.status_code != 422:
response.raise_for_status()
# wrap up application-level errors
doc = response.json()
if doc.get("errors"):
raise ChallongeException(*doc['errors'])
return response | [
"def",
"fetch",
"(",
"method",
",",
"uri",
",",
"params_prefix",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"params",
"=",
"_prepare_params",
"(",
"params",
",",
"params_prefix",
")",
"if",
"method",
"==",
"\"POST\"",
"or",
"method",
"==",
"\"PUT\"",
":",
"r_data",
"=",
"{",
"\"data\"",
":",
"params",
"}",
"else",
":",
"r_data",
"=",
"{",
"\"params\"",
":",
"params",
"}",
"# build the HTTP request and use basic authentication",
"url",
"=",
"\"https://%s/%s.json\"",
"%",
"(",
"CHALLONGE_API_URL",
",",
"uri",
")",
"try",
":",
"response",
"=",
"request",
"(",
"method",
",",
"url",
",",
"auth",
"=",
"get_credentials",
"(",
")",
",",
"*",
"*",
"r_data",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"HTTPError",
":",
"if",
"response",
".",
"status_code",
"!=",
"422",
":",
"response",
".",
"raise_for_status",
"(",
")",
"# wrap up application-level errors",
"doc",
"=",
"response",
".",
"json",
"(",
")",
"if",
"doc",
".",
"get",
"(",
"\"errors\"",
")",
":",
"raise",
"ChallongeException",
"(",
"*",
"doc",
"[",
"'errors'",
"]",
")",
"return",
"response"
] | Fetch the given uri and return the contents of the response. | [
"Fetch",
"the",
"given",
"uri",
"and",
"return",
"the",
"contents",
"of",
"the",
"response",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/io/abinit/tasks.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L1818-L1888 | def set_status(self, status, msg):
"""
Set and return the status of the task.
Args:
status: Status object or string representation of the status
msg: string with human-readable message used in the case of errors.
"""
# truncate string if it's long. msg will be logged in the object and we don't want to waste memory.
if len(msg) > 2000:
msg = msg[:2000]
msg += "\n... snip ...\n"
# Locked files must be explicitly unlocked
if self.status == self.S_LOCKED or status == self.S_LOCKED:
err_msg = (
"Locked files must be explicitly unlocked before calling set_status but\n"
"task.status = %s, input status = %s" % (self.status, status))
raise RuntimeError(err_msg)
status = Status.as_status(status)
changed = True
if hasattr(self, "_status"):
changed = (status != self._status)
self._status = status
if status == self.S_RUN:
# Set datetimes.start when the task enters S_RUN
if self.datetimes.start is None:
self.datetimes.start = datetime.datetime.now()
# Add new entry to history only if the status has changed.
if changed:
if status == self.S_SUB:
self.datetimes.submission = datetime.datetime.now()
self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % (
self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg))
elif status == self.S_OK:
self.history.info("Task completed %s", msg)
elif status == self.S_ABICRITICAL:
self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg)
else:
self.history.info("Status changed to %s. msg: %s", status, msg)
#######################################################
# The section belows contains callbacks that should not
# be executed if we are in spectator_mode
#######################################################
if status == self.S_DONE:
# Execute the callback
self._on_done()
if status == self.S_OK:
# Finalize the task.
if not self.finalized:
self._on_ok()
# here we remove the output files of the task and of its parents.
if self.gc is not None and self.gc.policy == "task":
self.clean_output_files()
if self.status == self.S_OK:
# Because _on_ok might have changed the status.
self.send_signal(self.S_OK)
return status | [
"def",
"set_status",
"(",
"self",
",",
"status",
",",
"msg",
")",
":",
"# truncate string if it's long. msg will be logged in the object and we don't want to waste memory.",
"if",
"len",
"(",
"msg",
")",
">",
"2000",
":",
"msg",
"=",
"msg",
"[",
":",
"2000",
"]",
"msg",
"+=",
"\"\\n... snip ...\\n\"",
"# Locked files must be explicitly unlocked",
"if",
"self",
".",
"status",
"==",
"self",
".",
"S_LOCKED",
"or",
"status",
"==",
"self",
".",
"S_LOCKED",
":",
"err_msg",
"=",
"(",
"\"Locked files must be explicitly unlocked before calling set_status but\\n\"",
"\"task.status = %s, input status = %s\"",
"%",
"(",
"self",
".",
"status",
",",
"status",
")",
")",
"raise",
"RuntimeError",
"(",
"err_msg",
")",
"status",
"=",
"Status",
".",
"as_status",
"(",
"status",
")",
"changed",
"=",
"True",
"if",
"hasattr",
"(",
"self",
",",
"\"_status\"",
")",
":",
"changed",
"=",
"(",
"status",
"!=",
"self",
".",
"_status",
")",
"self",
".",
"_status",
"=",
"status",
"if",
"status",
"==",
"self",
".",
"S_RUN",
":",
"# Set datetimes.start when the task enters S_RUN",
"if",
"self",
".",
"datetimes",
".",
"start",
"is",
"None",
":",
"self",
".",
"datetimes",
".",
"start",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"# Add new entry to history only if the status has changed.",
"if",
"changed",
":",
"if",
"status",
"==",
"self",
".",
"S_SUB",
":",
"self",
".",
"datetimes",
".",
"submission",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"self",
".",
"history",
".",
"info",
"(",
"\"Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s \"",
"%",
"(",
"self",
".",
"mpi_procs",
",",
"self",
".",
"omp_threads",
",",
"self",
".",
"mem_per_proc",
".",
"to",
"(",
"\"Gb\"",
")",
",",
"msg",
")",
")",
"elif",
"status",
"==",
"self",
".",
"S_OK",
":",
"self",
".",
"history",
".",
"info",
"(",
"\"Task completed %s\"",
",",
"msg",
")",
"elif",
"status",
"==",
"self",
".",
"S_ABICRITICAL",
":",
"self",
".",
"history",
".",
"info",
"(",
"\"Status set to S_ABI_CRITICAL due to: %s\"",
",",
"msg",
")",
"else",
":",
"self",
".",
"history",
".",
"info",
"(",
"\"Status changed to %s. msg: %s\"",
",",
"status",
",",
"msg",
")",
"#######################################################",
"# The section belows contains callbacks that should not",
"# be executed if we are in spectator_mode",
"#######################################################",
"if",
"status",
"==",
"self",
".",
"S_DONE",
":",
"# Execute the callback",
"self",
".",
"_on_done",
"(",
")",
"if",
"status",
"==",
"self",
".",
"S_OK",
":",
"# Finalize the task.",
"if",
"not",
"self",
".",
"finalized",
":",
"self",
".",
"_on_ok",
"(",
")",
"# here we remove the output files of the task and of its parents.",
"if",
"self",
".",
"gc",
"is",
"not",
"None",
"and",
"self",
".",
"gc",
".",
"policy",
"==",
"\"task\"",
":",
"self",
".",
"clean_output_files",
"(",
")",
"if",
"self",
".",
"status",
"==",
"self",
".",
"S_OK",
":",
"# Because _on_ok might have changed the status.",
"self",
".",
"send_signal",
"(",
"self",
".",
"S_OK",
")",
"return",
"status"
] | Set and return the status of the task.
Args:
status: Status object or string representation of the status
msg: string with human-readable message used in the case of errors. | [
"Set",
"and",
"return",
"the",
"status",
"of",
"the",
"task",
"."
] | python | train |
hydpy-dev/hydpy | hydpy/models/lland/lland_derived.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_derived.py#L21-L35 | def update(self):
"""Update |AbsFHRU| based on |FT| and |FHRU|.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(2)
>>> lnk(ACKER)
>>> ft(100.0)
>>> fhru(0.2, 0.8)
>>> derived.absfhru.update()
>>> derived.absfhru
absfhru(20.0, 80.0)
"""
control = self.subpars.pars.control
self(control.ft*control.fhru) | [
"def",
"update",
"(",
"self",
")",
":",
"control",
"=",
"self",
".",
"subpars",
".",
"pars",
".",
"control",
"self",
"(",
"control",
".",
"ft",
"*",
"control",
".",
"fhru",
")"
] | Update |AbsFHRU| based on |FT| and |FHRU|.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(2)
>>> lnk(ACKER)
>>> ft(100.0)
>>> fhru(0.2, 0.8)
>>> derived.absfhru.update()
>>> derived.absfhru
absfhru(20.0, 80.0) | [
"Update",
"|AbsFHRU|",
"based",
"on",
"|FT|",
"and",
"|FHRU|",
"."
] | python | train |
jazzband/django-model-utils | model_utils/models.py | https://github.com/jazzband/django-model-utils/blob/d557c4253312774a7c2f14bcd02675e9ac2ea05f/model_utils/models.py#L86-L102 | def add_timeframed_query_manager(sender, **kwargs):
"""
Add a QueryManager for a specific timeframe.
"""
if not issubclass(sender, TimeFramedModel):
return
if _field_exists(sender, 'timeframed'):
raise ImproperlyConfigured(
"Model '%s' has a field named 'timeframed' "
"which conflicts with the TimeFramedModel manager."
% sender.__name__
)
sender.add_to_class('timeframed', QueryManager(
(models.Q(start__lte=now) | models.Q(start__isnull=True)) &
(models.Q(end__gte=now) | models.Q(end__isnull=True))
)) | [
"def",
"add_timeframed_query_manager",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"issubclass",
"(",
"sender",
",",
"TimeFramedModel",
")",
":",
"return",
"if",
"_field_exists",
"(",
"sender",
",",
"'timeframed'",
")",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"Model '%s' has a field named 'timeframed' \"",
"\"which conflicts with the TimeFramedModel manager.\"",
"%",
"sender",
".",
"__name__",
")",
"sender",
".",
"add_to_class",
"(",
"'timeframed'",
",",
"QueryManager",
"(",
"(",
"models",
".",
"Q",
"(",
"start__lte",
"=",
"now",
")",
"|",
"models",
".",
"Q",
"(",
"start__isnull",
"=",
"True",
")",
")",
"&",
"(",
"models",
".",
"Q",
"(",
"end__gte",
"=",
"now",
")",
"|",
"models",
".",
"Q",
"(",
"end__isnull",
"=",
"True",
")",
")",
")",
")"
] | Add a QueryManager for a specific timeframe. | [
"Add",
"a",
"QueryManager",
"for",
"a",
"specific",
"timeframe",
"."
] | python | train |
moonlitesolutions/SolrClient | SolrClient/solrresp.py | https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L270-L279 | def get_first_field_values_as_list(self, field):
'''
:param str field: The name of the field for lookup.
Goes through all documents returned looking for specified field. At first encounter will return the field's value.
'''
for doc in self.docs:
if field in doc.keys():
return doc[field]
raise SolrResponseError("No field in result set") | [
"def",
"get_first_field_values_as_list",
"(",
"self",
",",
"field",
")",
":",
"for",
"doc",
"in",
"self",
".",
"docs",
":",
"if",
"field",
"in",
"doc",
".",
"keys",
"(",
")",
":",
"return",
"doc",
"[",
"field",
"]",
"raise",
"SolrResponseError",
"(",
"\"No field in result set\"",
")"
] | :param str field: The name of the field for lookup.
Goes through all documents returned looking for specified field. At first encounter will return the field's value. | [
":",
"param",
"str",
"field",
":",
"The",
"name",
"of",
"the",
"field",
"for",
"lookup",
"."
] | python | train |
log2timeline/plaso | plaso/cli/tools.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/tools.py#L298-L330 | def ListTimeZones(self):
"""Lists the timezones."""
max_length = 0
for timezone_name in pytz.all_timezones:
if len(timezone_name) > max_length:
max_length = len(timezone_name)
utc_date_time = datetime.datetime.utcnow()
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Timezone', 'UTC Offset'],
title='Zones')
for timezone_name in pytz.all_timezones:
try:
local_timezone = pytz.timezone(timezone_name)
except AssertionError as exception:
logger.error((
'Unable to determine information about timezone: {0:s} with '
'error: {1!s}').format(timezone_name, exception))
continue
local_date_string = '{0!s}'.format(
local_timezone.localize(utc_date_time))
if '+' in local_date_string:
_, _, diff = local_date_string.rpartition('+')
diff_string = '+{0:s}'.format(diff)
else:
_, _, diff = local_date_string.rpartition('-')
diff_string = '-{0:s}'.format(diff)
table_view.AddRow([timezone_name, diff_string])
table_view.Write(self._output_writer) | [
"def",
"ListTimeZones",
"(",
"self",
")",
":",
"max_length",
"=",
"0",
"for",
"timezone_name",
"in",
"pytz",
".",
"all_timezones",
":",
"if",
"len",
"(",
"timezone_name",
")",
">",
"max_length",
":",
"max_length",
"=",
"len",
"(",
"timezone_name",
")",
"utc_date_time",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"table_view",
"=",
"views",
".",
"ViewsFactory",
".",
"GetTableView",
"(",
"self",
".",
"_views_format_type",
",",
"column_names",
"=",
"[",
"'Timezone'",
",",
"'UTC Offset'",
"]",
",",
"title",
"=",
"'Zones'",
")",
"for",
"timezone_name",
"in",
"pytz",
".",
"all_timezones",
":",
"try",
":",
"local_timezone",
"=",
"pytz",
".",
"timezone",
"(",
"timezone_name",
")",
"except",
"AssertionError",
"as",
"exception",
":",
"logger",
".",
"error",
"(",
"(",
"'Unable to determine information about timezone: {0:s} with '",
"'error: {1!s}'",
")",
".",
"format",
"(",
"timezone_name",
",",
"exception",
")",
")",
"continue",
"local_date_string",
"=",
"'{0!s}'",
".",
"format",
"(",
"local_timezone",
".",
"localize",
"(",
"utc_date_time",
")",
")",
"if",
"'+'",
"in",
"local_date_string",
":",
"_",
",",
"_",
",",
"diff",
"=",
"local_date_string",
".",
"rpartition",
"(",
"'+'",
")",
"diff_string",
"=",
"'+{0:s}'",
".",
"format",
"(",
"diff",
")",
"else",
":",
"_",
",",
"_",
",",
"diff",
"=",
"local_date_string",
".",
"rpartition",
"(",
"'-'",
")",
"diff_string",
"=",
"'-{0:s}'",
".",
"format",
"(",
"diff",
")",
"table_view",
".",
"AddRow",
"(",
"[",
"timezone_name",
",",
"diff_string",
"]",
")",
"table_view",
".",
"Write",
"(",
"self",
".",
"_output_writer",
")"
] | Lists the timezones. | [
"Lists",
"the",
"timezones",
"."
] | python | train |
lehins/django-smartfields | smartfields/managers.py | https://github.com/lehins/django-smartfields/blob/23d4b0b18352f4f40ce8c429735e673ba5191502/smartfields/managers.py#L203-L206 | def set_status(self, instance, status):
"""Sets the field status for up to 5 minutes."""
status_key = self.get_status_key(instance)
cache.set(status_key, status, timeout=300) | [
"def",
"set_status",
"(",
"self",
",",
"instance",
",",
"status",
")",
":",
"status_key",
"=",
"self",
".",
"get_status_key",
"(",
"instance",
")",
"cache",
".",
"set",
"(",
"status_key",
",",
"status",
",",
"timeout",
"=",
"300",
")"
] | Sets the field status for up to 5 minutes. | [
"Sets",
"the",
"field",
"status",
"for",
"up",
"to",
"5",
"minutes",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.