nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
triaquae/triaquae
|
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
|
TriAquae/models/Centos_6.4/paramiko/server.py
|
python
|
ServerInterface.check_auth_none
|
(self, username)
|
return AUTH_FAILED
|
Determine if a client may open channels with no (further)
authentication.
Return L{AUTH_FAILED} if the client must authenticate, or
L{AUTH_SUCCESSFUL} if it's okay for the client to not
authenticate.
The default implementation always returns L{AUTH_FAILED}.
@param username: the username of the client.
@type username: str
@return: L{AUTH_FAILED} if the authentication fails;
L{AUTH_SUCCESSFUL} if it succeeds.
@rtype: int
|
Determine if a client may open channels with no (further)
authentication.
|
[
"Determine",
"if",
"a",
"client",
"may",
"open",
"channels",
"with",
"no",
"(",
"further",
")",
"authentication",
"."
] |
def check_auth_none(self, username):
"""
Determine if a client may open channels with no (further)
authentication.
Return L{AUTH_FAILED} if the client must authenticate, or
L{AUTH_SUCCESSFUL} if it's okay for the client to not
authenticate.
The default implementation always returns L{AUTH_FAILED}.
@param username: the username of the client.
@type username: str
@return: L{AUTH_FAILED} if the authentication fails;
L{AUTH_SUCCESSFUL} if it succeeds.
@rtype: int
"""
return AUTH_FAILED
|
[
"def",
"check_auth_none",
"(",
"self",
",",
"username",
")",
":",
"return",
"AUTH_FAILED"
] |
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Centos_6.4/paramiko/server.py#L144-L161
|
|
arthurdejong/python-stdnum
|
02dec52602ae0709b940b781fc1fcebfde7340b7
|
stdnum/it/aic.py
|
python
|
validate_base10
|
(number)
|
return number
|
Check if a string is a valid BASE10 representation of an AIC.
|
Check if a string is a valid BASE10 representation of an AIC.
|
[
"Check",
"if",
"a",
"string",
"is",
"a",
"valid",
"BASE10",
"representation",
"of",
"an",
"AIC",
"."
] |
def validate_base10(number):
"""Check if a string is a valid BASE10 representation of an AIC."""
number = compact(number)
if len(number) != 9:
raise InvalidLength()
if not isdigits(number):
raise InvalidFormat()
if number[0] != '0':
raise InvalidComponent()
if calc_check_digit(number) != number[-1]:
raise InvalidChecksum()
return number
|
[
"def",
"validate_base10",
"(",
"number",
")",
":",
"number",
"=",
"compact",
"(",
"number",
")",
"if",
"len",
"(",
"number",
")",
"!=",
"9",
":",
"raise",
"InvalidLength",
"(",
")",
"if",
"not",
"isdigits",
"(",
"number",
")",
":",
"raise",
"InvalidFormat",
"(",
")",
"if",
"number",
"[",
"0",
"]",
"!=",
"'0'",
":",
"raise",
"InvalidComponent",
"(",
")",
"if",
"calc_check_digit",
"(",
"number",
")",
"!=",
"number",
"[",
"-",
"1",
"]",
":",
"raise",
"InvalidChecksum",
"(",
")",
"return",
"number"
] |
https://github.com/arthurdejong/python-stdnum/blob/02dec52602ae0709b940b781fc1fcebfde7340b7/stdnum/it/aic.py#L95-L106
|
|
IBM/differential-privacy-library
|
90b319a90414ebf12062887c07e1609f888e1a34
|
diffprivlib/mechanisms/gaussian.py
|
python
|
GaussianDiscrete._find_scale
|
(self)
|
return (guess_0 + guess_1) / 2
|
Determine the scale of the mechanism's distribution given epsilon and delta.
|
Determine the scale of the mechanism's distribution given epsilon and delta.
|
[
"Determine",
"the",
"scale",
"of",
"the",
"mechanism",
"s",
"distribution",
"given",
"epsilon",
"and",
"delta",
"."
] |
def _find_scale(self):
"""Determine the scale of the mechanism's distribution given epsilon and delta.
"""
if self.sensitivity / self.epsilon == 0:
return 0
def objective(sigma, epsilon_, delta_, sensitivity_):
"""Function for which we are seeking its root. """
idx_0 = int(np.floor(epsilon_ * sigma ** 2 / sensitivity_ - sensitivity_ / 2))
idx_1 = int(np.floor(epsilon_ * sigma ** 2 / sensitivity_ + sensitivity_ / 2))
idx = 1
lhs, rhs, denom = float(idx_0 < 0), 0, 1
_term, diff = 1, 1
while _term > 0 and diff > 0:
_term = np.exp(-idx ** 2 / 2 / sigma ** 2)
if idx > idx_0:
lhs += _term
if idx_0 < -idx:
lhs += _term
if idx > idx_1:
diff = -rhs
rhs += _term
diff += rhs
denom += 2 * _term
idx += 1
if idx > 1e6:
raise ValueError("Infinite sum not converging, aborting. Try changing the epsilon and/or delta.")
return (lhs - np.exp(epsilon_) * rhs) / denom - delta_
epsilon = self.epsilon
delta = self.delta
sensitivity = self.sensitivity
# Begin by locating the root within an interval [2**i, 2**(i+1)]
guess_0 = 1
f_0 = objective(guess_0, epsilon, delta, sensitivity)
pwr = 1 if f_0 > 0 else -1
guess_1 = 2 ** pwr
f_1 = objective(guess_1, epsilon, delta, sensitivity)
while f_0 * f_1 > 0:
guess_0 *= 2 ** pwr
guess_1 *= 2 ** pwr
f_0 = f_1
f_1 = objective(guess_1, epsilon, delta, sensitivity)
# Find the root (sigma) using the bisection method
while not np.isclose(guess_0, guess_1, atol=1e-12, rtol=1e-6):
guess_mid = (guess_0 + guess_1) / 2
f_mid = objective(guess_mid, epsilon, delta, sensitivity)
if f_mid * f_0 <= 0:
f_1 = f_mid
guess_1 = guess_mid
if f_mid * f_1 <= 0:
f_0 = f_mid
guess_0 = guess_mid
return (guess_0 + guess_1) / 2
|
[
"def",
"_find_scale",
"(",
"self",
")",
":",
"if",
"self",
".",
"sensitivity",
"/",
"self",
".",
"epsilon",
"==",
"0",
":",
"return",
"0",
"def",
"objective",
"(",
"sigma",
",",
"epsilon_",
",",
"delta_",
",",
"sensitivity_",
")",
":",
"\"\"\"Function for which we are seeking its root. \"\"\"",
"idx_0",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"epsilon_",
"*",
"sigma",
"**",
"2",
"/",
"sensitivity_",
"-",
"sensitivity_",
"/",
"2",
")",
")",
"idx_1",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"epsilon_",
"*",
"sigma",
"**",
"2",
"/",
"sensitivity_",
"+",
"sensitivity_",
"/",
"2",
")",
")",
"idx",
"=",
"1",
"lhs",
",",
"rhs",
",",
"denom",
"=",
"float",
"(",
"idx_0",
"<",
"0",
")",
",",
"0",
",",
"1",
"_term",
",",
"diff",
"=",
"1",
",",
"1",
"while",
"_term",
">",
"0",
"and",
"diff",
">",
"0",
":",
"_term",
"=",
"np",
".",
"exp",
"(",
"-",
"idx",
"**",
"2",
"/",
"2",
"/",
"sigma",
"**",
"2",
")",
"if",
"idx",
">",
"idx_0",
":",
"lhs",
"+=",
"_term",
"if",
"idx_0",
"<",
"-",
"idx",
":",
"lhs",
"+=",
"_term",
"if",
"idx",
">",
"idx_1",
":",
"diff",
"=",
"-",
"rhs",
"rhs",
"+=",
"_term",
"diff",
"+=",
"rhs",
"denom",
"+=",
"2",
"*",
"_term",
"idx",
"+=",
"1",
"if",
"idx",
">",
"1e6",
":",
"raise",
"ValueError",
"(",
"\"Infinite sum not converging, aborting. Try changing the epsilon and/or delta.\"",
")",
"return",
"(",
"lhs",
"-",
"np",
".",
"exp",
"(",
"epsilon_",
")",
"*",
"rhs",
")",
"/",
"denom",
"-",
"delta_",
"epsilon",
"=",
"self",
".",
"epsilon",
"delta",
"=",
"self",
".",
"delta",
"sensitivity",
"=",
"self",
".",
"sensitivity",
"# Begin by locating the root within an interval [2**i, 2**(i+1)]",
"guess_0",
"=",
"1",
"f_0",
"=",
"objective",
"(",
"guess_0",
",",
"epsilon",
",",
"delta",
",",
"sensitivity",
")",
"pwr",
"=",
"1",
"if",
"f_0",
">",
"0",
"else",
"-",
"1",
"guess_1",
"=",
"2",
"**",
"pwr",
"f_1",
"=",
"objective",
"(",
"guess_1",
",",
"epsilon",
",",
"delta",
",",
"sensitivity",
")",
"while",
"f_0",
"*",
"f_1",
">",
"0",
":",
"guess_0",
"*=",
"2",
"**",
"pwr",
"guess_1",
"*=",
"2",
"**",
"pwr",
"f_0",
"=",
"f_1",
"f_1",
"=",
"objective",
"(",
"guess_1",
",",
"epsilon",
",",
"delta",
",",
"sensitivity",
")",
"# Find the root (sigma) using the bisection method",
"while",
"not",
"np",
".",
"isclose",
"(",
"guess_0",
",",
"guess_1",
",",
"atol",
"=",
"1e-12",
",",
"rtol",
"=",
"1e-6",
")",
":",
"guess_mid",
"=",
"(",
"guess_0",
"+",
"guess_1",
")",
"/",
"2",
"f_mid",
"=",
"objective",
"(",
"guess_mid",
",",
"epsilon",
",",
"delta",
",",
"sensitivity",
")",
"if",
"f_mid",
"*",
"f_0",
"<=",
"0",
":",
"f_1",
"=",
"f_mid",
"guess_1",
"=",
"guess_mid",
"if",
"f_mid",
"*",
"f_1",
"<=",
"0",
":",
"f_0",
"=",
"f_mid",
"guess_0",
"=",
"guess_mid",
"return",
"(",
"guess_0",
"+",
"guess_1",
")",
"/",
"2"
] |
https://github.com/IBM/differential-privacy-library/blob/90b319a90414ebf12062887c07e1609f888e1a34/diffprivlib/mechanisms/gaussian.py#L284-L350
|
|
tensorflow/compression
|
369d398be937983b3abb7c5445400a6f5d55ffc9
|
models/hific/archs.py
|
python
|
_PatchDiscriminatorCompareGANImpl.__init__
|
(self,
name,
num_filters_base=64,
num_layers=3,
)
|
Instantiate discriminator.
Args:
name: Name of the layer.
num_filters_base: Number of base filters. will be multiplied as we
go down in resolution.
num_layers: Number of downscaling convolutions.
|
Instantiate discriminator.
|
[
"Instantiate",
"discriminator",
"."
] |
def __init__(self,
name,
num_filters_base=64,
num_layers=3,
):
"""Instantiate discriminator.
Args:
name: Name of the layer.
num_filters_base: Number of base filters. will be multiplied as we
go down in resolution.
num_layers: Number of downscaling convolutions.
"""
super(_PatchDiscriminatorCompareGANImpl, self).__init__(
name, batch_norm_fn=None, layer_norm=False, spectral_norm=True)
self._num_layers = num_layers
self._num_filters_base = num_filters_base
|
[
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"num_filters_base",
"=",
"64",
",",
"num_layers",
"=",
"3",
",",
")",
":",
"super",
"(",
"_PatchDiscriminatorCompareGANImpl",
",",
"self",
")",
".",
"__init__",
"(",
"name",
",",
"batch_norm_fn",
"=",
"None",
",",
"layer_norm",
"=",
"False",
",",
"spectral_norm",
"=",
"True",
")",
"self",
".",
"_num_layers",
"=",
"num_layers",
"self",
".",
"_num_filters_base",
"=",
"num_filters_base"
] |
https://github.com/tensorflow/compression/blob/369d398be937983b3abb7c5445400a6f5d55ffc9/models/hific/archs.py#L308-L326
|
||
bugy/script-server
|
9a57ce15903c81bcb537b872f1330ee55ba31563
|
src/communications/destination_http.py
|
python
|
HttpDestination.__str__
|
(self, *args, **kwargs)
|
return type(self).__name__ + ' for ' + str(self._communicator)
|
[] |
def __str__(self, *args, **kwargs):
return type(self).__name__ + ' for ' + str(self._communicator)
|
[
"def",
"__str__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"type",
"(",
"self",
")",
".",
"__name__",
"+",
"' for '",
"+",
"str",
"(",
"self",
".",
"_communicator",
")"
] |
https://github.com/bugy/script-server/blob/9a57ce15903c81bcb537b872f1330ee55ba31563/src/communications/destination_http.py#L38-L39
|
|||
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_vendored_deps/library/oc_adm_ca_server_cert.py
|
python
|
Yedit.pop
|
(self, path, key_or_item)
|
return (False, self.yaml_dict)
|
remove a key, value pair from a dict or an item for a list
|
remove a key, value pair from a dict or an item for a list
|
[
"remove",
"a",
"key",
"value",
"pair",
"from",
"a",
"dict",
"or",
"an",
"item",
"for",
"a",
"list"
] |
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
|
[
"def",
"pop",
"(",
"self",
",",
"path",
",",
"key_or_item",
")",
":",
"try",
":",
"entry",
"=",
"Yedit",
".",
"get_entry",
"(",
"self",
".",
"yaml_dict",
",",
"path",
",",
"self",
".",
"separator",
")",
"except",
"KeyError",
":",
"entry",
"=",
"None",
"if",
"entry",
"is",
"None",
":",
"return",
"(",
"False",
",",
"self",
".",
"yaml_dict",
")",
"if",
"isinstance",
"(",
"entry",
",",
"dict",
")",
":",
"# AUDIT:maybe-no-member makes sense due to fuzzy types",
"# pylint: disable=maybe-no-member",
"if",
"key_or_item",
"in",
"entry",
":",
"entry",
".",
"pop",
"(",
"key_or_item",
")",
"return",
"(",
"True",
",",
"self",
".",
"yaml_dict",
")",
"return",
"(",
"False",
",",
"self",
".",
"yaml_dict",
")",
"elif",
"isinstance",
"(",
"entry",
",",
"list",
")",
":",
"# AUDIT:maybe-no-member makes sense due to fuzzy types",
"# pylint: disable=maybe-no-member",
"ind",
"=",
"None",
"try",
":",
"ind",
"=",
"entry",
".",
"index",
"(",
"key_or_item",
")",
"except",
"ValueError",
":",
"return",
"(",
"False",
",",
"self",
".",
"yaml_dict",
")",
"entry",
".",
"pop",
"(",
"ind",
")",
"return",
"(",
"True",
",",
"self",
".",
"yaml_dict",
")",
"return",
"(",
"False",
",",
"self",
".",
"yaml_dict",
")"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_vendored_deps/library/oc_adm_ca_server_cert.py#L480-L510
|
|
holoviz/holoviews
|
cc6b27f01710402fdfee2aeef1507425ca78c91f
|
holoviews/core/layout.py
|
python
|
NdLayout.last
|
(self)
|
return self.clone(last_items)
|
Returns another NdLayout constituted of the last views of the
individual elements (if they are maps).
|
Returns another NdLayout constituted of the last views of the
individual elements (if they are maps).
|
[
"Returns",
"another",
"NdLayout",
"constituted",
"of",
"the",
"last",
"views",
"of",
"the",
"individual",
"elements",
"(",
"if",
"they",
"are",
"maps",
")",
"."
] |
def last(self):
"""
Returns another NdLayout constituted of the last views of the
individual elements (if they are maps).
"""
last_items = []
for (k, v) in self.items():
if isinstance(v, NdMapping):
item = (k, v.clone((v.last_key, v.last)))
elif isinstance(v, AdjointLayout):
item = (k, v.last)
else:
item = (k, v)
last_items.append(item)
return self.clone(last_items)
|
[
"def",
"last",
"(",
"self",
")",
":",
"last_items",
"=",
"[",
"]",
"for",
"(",
"k",
",",
"v",
")",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"NdMapping",
")",
":",
"item",
"=",
"(",
"k",
",",
"v",
".",
"clone",
"(",
"(",
"v",
".",
"last_key",
",",
"v",
".",
"last",
")",
")",
")",
"elif",
"isinstance",
"(",
"v",
",",
"AdjointLayout",
")",
":",
"item",
"=",
"(",
"k",
",",
"v",
".",
"last",
")",
"else",
":",
"item",
"=",
"(",
"k",
",",
"v",
")",
"last_items",
".",
"append",
"(",
"item",
")",
"return",
"self",
".",
"clone",
"(",
"last_items",
")"
] |
https://github.com/holoviz/holoviews/blob/cc6b27f01710402fdfee2aeef1507425ca78c91f/holoviews/core/layout.py#L388-L402
|
|
numenta/nupic.torch
|
40afbcbb992c71c759dca94e67e8f3b15d3bd4c9
|
examples/gsc_pretrain/train_model.py
|
python
|
preprocessed_dataset
|
(filepath)
|
return torch.utils.data.TensorDataset(x, y)
|
Get a processed dataset
:param cachefilepath:
Path to the processed data.
:type cachefilepath: pathlib.Path
:return: torch.utils.data.TensorDataset
|
Get a processed dataset
|
[
"Get",
"a",
"processed",
"dataset"
] |
def preprocessed_dataset(filepath):
"""
Get a processed dataset
:param cachefilepath:
Path to the processed data.
:type cachefilepath: pathlib.Path
:return: torch.utils.data.TensorDataset
"""
x, y = np.load(filepath).values()
x, y = map(torch.tensor, (x, y))
return torch.utils.data.TensorDataset(x, y)
|
[
"def",
"preprocessed_dataset",
"(",
"filepath",
")",
":",
"x",
",",
"y",
"=",
"np",
".",
"load",
"(",
"filepath",
")",
".",
"values",
"(",
")",
"x",
",",
"y",
"=",
"map",
"(",
"torch",
".",
"tensor",
",",
"(",
"x",
",",
"y",
")",
")",
"return",
"torch",
".",
"utils",
".",
"data",
".",
"TensorDataset",
"(",
"x",
",",
"y",
")"
] |
https://github.com/numenta/nupic.torch/blob/40afbcbb992c71c759dca94e67e8f3b15d3bd4c9/examples/gsc_pretrain/train_model.py#L184-L197
|
|
stopstalk/stopstalk-deployment
|
10c3ab44c4ece33ae515f6888c15033db2004bb1
|
aws_lambda/spoj_aws_lambda_function/lambda_code/pip/_vendor/urllib3/util/connection.py
|
python
|
allowed_gai_family
|
()
|
return family
|
This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records.
|
This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records.
|
[
"This",
"function",
"is",
"designed",
"to",
"work",
"in",
"the",
"context",
"of",
"getaddrinfo",
"where",
"family",
"=",
"socket",
".",
"AF_UNSPEC",
"is",
"the",
"default",
"and",
"will",
"perform",
"a",
"DNS",
"search",
"for",
"both",
"IPv6",
"and",
"IPv4",
"records",
"."
] |
def allowed_gai_family():
"""This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records."""
family = socket.AF_INET
if HAS_IPV6:
family = socket.AF_UNSPEC
return family
|
[
"def",
"allowed_gai_family",
"(",
")",
":",
"family",
"=",
"socket",
".",
"AF_INET",
"if",
"HAS_IPV6",
":",
"family",
"=",
"socket",
".",
"AF_UNSPEC",
"return",
"family"
] |
https://github.com/stopstalk/stopstalk-deployment/blob/10c3ab44c4ece33ae515f6888c15033db2004bb1/aws_lambda/spoj_aws_lambda_function/lambda_code/pip/_vendor/urllib3/util/connection.py#L92-L100
|
|
Yelp/paasta
|
6c08c04a577359509575c794b973ea84d72accf9
|
paasta_tools/cli/cmds/local_run.py
|
python
|
perform_cmd_healthcheck
|
(docker_client, container_id, command, timeout)
|
Returns true if return code of command is 0 when executed inside container, false otherwise
:param docker_client: Docker client object
:param container_id: Docker container id
:param command: command to execute
:param timeout: timeout in seconds
:returns: True if command exits with return code 0, false otherwise
|
Returns true if return code of command is 0 when executed inside container, false otherwise
|
[
"Returns",
"true",
"if",
"return",
"code",
"of",
"command",
"is",
"0",
"when",
"executed",
"inside",
"container",
"false",
"otherwise"
] |
def perform_cmd_healthcheck(docker_client, container_id, command, timeout):
"""Returns true if return code of command is 0 when executed inside container, false otherwise
:param docker_client: Docker client object
:param container_id: Docker container id
:param command: command to execute
:param timeout: timeout in seconds
:returns: True if command exits with return code 0, false otherwise
"""
(output, return_code) = execute_in_container(
docker_client, container_id, command, timeout
)
if return_code == 0:
return (True, output)
else:
return (False, output)
|
[
"def",
"perform_cmd_healthcheck",
"(",
"docker_client",
",",
"container_id",
",",
"command",
",",
"timeout",
")",
":",
"(",
"output",
",",
"return_code",
")",
"=",
"execute_in_container",
"(",
"docker_client",
",",
"container_id",
",",
"command",
",",
"timeout",
")",
"if",
"return_code",
"==",
"0",
":",
"return",
"(",
"True",
",",
"output",
")",
"else",
":",
"return",
"(",
"False",
",",
"output",
")"
] |
https://github.com/Yelp/paasta/blob/6c08c04a577359509575c794b973ea84d72accf9/paasta_tools/cli/cmds/local_run.py#L117-L132
|
||
almarklein/visvis
|
766ed97767b44a55a6ff72c742d7385e074d3d55
|
core/light.py
|
python
|
Light.isOn
|
(self)
|
return self._on
|
Get whether the light is on.
|
Get whether the light is on.
|
[
"Get",
"whether",
"the",
"light",
"is",
"on",
"."
] |
def isOn(self):
""" Get whether the light is on.
"""
return self._on
|
[
"def",
"isOn",
"(",
"self",
")",
":",
"return",
"self",
".",
"_on"
] |
https://github.com/almarklein/visvis/blob/766ed97767b44a55a6ff72c742d7385e074d3d55/core/light.py#L249-L252
|
|
Rapptz/discord.py
|
45d498c1b76deaf3b394d17ccf56112fa691d160
|
discord/voice_client.py
|
python
|
VoiceProtocol.cleanup
|
(self)
|
This method *must* be called to ensure proper clean-up during a disconnect.
It is advisable to call this from within :meth:`disconnect` when you are
completely done with the voice protocol instance.
This method removes it from the internal state cache that keeps track of
currently alive voice clients. Failure to clean-up will cause subsequent
connections to report that it's still connected.
|
This method *must* be called to ensure proper clean-up during a disconnect.
|
[
"This",
"method",
"*",
"must",
"*",
"be",
"called",
"to",
"ensure",
"proper",
"clean",
"-",
"up",
"during",
"a",
"disconnect",
"."
] |
def cleanup(self) -> None:
"""This method *must* be called to ensure proper clean-up during a disconnect.
It is advisable to call this from within :meth:`disconnect` when you are
completely done with the voice protocol instance.
This method removes it from the internal state cache that keeps track of
currently alive voice clients. Failure to clean-up will cause subsequent
connections to report that it's still connected.
"""
key_id, _ = self.channel._get_voice_client_key()
self.client._connection._remove_voice_client(key_id)
|
[
"def",
"cleanup",
"(",
"self",
")",
"->",
"None",
":",
"key_id",
",",
"_",
"=",
"self",
".",
"channel",
".",
"_get_voice_client_key",
"(",
")",
"self",
".",
"client",
".",
"_connection",
".",
"_remove_voice_client",
"(",
"key_id",
")"
] |
https://github.com/Rapptz/discord.py/blob/45d498c1b76deaf3b394d17ccf56112fa691d160/discord/voice_client.py#L185-L196
|
||
nucleic/enaml
|
65c2a2a2d765e88f2e1103046680571894bb41ed
|
enaml/widgets/file_dialog_ex.py
|
python
|
FileDialogEx._prepare
|
(self)
|
A reimplemented preparation method.
This method resets the selected paths and filters.
|
A reimplemented preparation method.
|
[
"A",
"reimplemented",
"preparation",
"method",
"."
] |
def _prepare(self):
""" A reimplemented preparation method.
This method resets the selected paths and filters.
"""
super(FileDialogEx, self)._prepare()
self.selected_paths = []
self.selected_name_filter = u''
|
[
"def",
"_prepare",
"(",
"self",
")",
":",
"super",
"(",
"FileDialogEx",
",",
"self",
")",
".",
"_prepare",
"(",
")",
"self",
".",
"selected_paths",
"=",
"[",
"]",
"self",
".",
"selected_name_filter",
"=",
"u''"
] |
https://github.com/nucleic/enaml/blob/65c2a2a2d765e88f2e1103046680571894bb41ed/enaml/widgets/file_dialog_ex.py#L218-L226
|
||
meraki/dashboard-api-python
|
aef5e6fe5d23a40d435d5c64ff30580a28af07f1
|
meraki_v0/api/switch_stacks.py
|
python
|
SwitchStacks.createNetworkSwitchStack
|
(self, networkId: str, name: str, serials: list)
|
return self._session.post(metadata, resource, payload)
|
**Create a stack**
https://developer.cisco.com/meraki/api/#!create-network-switch-stack
- networkId (string)
- name (string): The name of the new stack
- serials (array): An array of switch serials to be added into the new stack
|
**Create a stack**
https://developer.cisco.com/meraki/api/#!create-network-switch-stack
- networkId (string)
- name (string): The name of the new stack
- serials (array): An array of switch serials to be added into the new stack
|
[
"**",
"Create",
"a",
"stack",
"**",
"https",
":",
"//",
"developer",
".",
"cisco",
".",
"com",
"/",
"meraki",
"/",
"api",
"/",
"#!create",
"-",
"network",
"-",
"switch",
"-",
"stack",
"-",
"networkId",
"(",
"string",
")",
"-",
"name",
"(",
"string",
")",
":",
"The",
"name",
"of",
"the",
"new",
"stack",
"-",
"serials",
"(",
"array",
")",
":",
"An",
"array",
"of",
"switch",
"serials",
"to",
"be",
"added",
"into",
"the",
"new",
"stack"
] |
def createNetworkSwitchStack(self, networkId: str, name: str, serials: list):
"""
**Create a stack**
https://developer.cisco.com/meraki/api/#!create-network-switch-stack
- networkId (string)
- name (string): The name of the new stack
- serials (array): An array of switch serials to be added into the new stack
"""
kwargs = locals()
metadata = {
'tags': ['Switch stacks'],
'operation': 'createNetworkSwitchStack',
}
resource = f'/networks/{networkId}/switchStacks'
body_params = ['name', 'serials']
payload = {k.strip(): v for (k, v) in kwargs.items() if k.strip() in body_params}
return self._session.post(metadata, resource, payload)
|
[
"def",
"createNetworkSwitchStack",
"(",
"self",
",",
"networkId",
":",
"str",
",",
"name",
":",
"str",
",",
"serials",
":",
"list",
")",
":",
"kwargs",
"=",
"locals",
"(",
")",
"metadata",
"=",
"{",
"'tags'",
":",
"[",
"'Switch stacks'",
"]",
",",
"'operation'",
":",
"'createNetworkSwitchStack'",
",",
"}",
"resource",
"=",
"f'/networks/{networkId}/switchStacks'",
"body_params",
"=",
"[",
"'name'",
",",
"'serials'",
"]",
"payload",
"=",
"{",
"k",
".",
"strip",
"(",
")",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"k",
".",
"strip",
"(",
")",
"in",
"body_params",
"}",
"return",
"self",
".",
"_session",
".",
"post",
"(",
"metadata",
",",
"resource",
",",
"payload",
")"
] |
https://github.com/meraki/dashboard-api-python/blob/aef5e6fe5d23a40d435d5c64ff30580a28af07f1/meraki_v0/api/switch_stacks.py#L22-L43
|
|
hiveml/tensorflow-grad-cam
|
9f4c9b7a5f9c94a0490b143282abc965ce1cfb0f
|
model/nets/resnet_v2.py
|
python
|
resnet_v2_152
|
(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_152')
|
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
|
ResNet-152 model of [1]. See resnet_v2() for arg and return description.
|
ResNet-152 model of [1]. See resnet_v2() for arg and return description.
|
[
"ResNet",
"-",
"152",
"model",
"of",
"[",
"1",
"]",
".",
"See",
"resnet_v2",
"()",
"for",
"arg",
"and",
"return",
"description",
"."
] |
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
|
[
"def",
"resnet_v2_152",
"(",
"inputs",
",",
"num_classes",
"=",
"None",
",",
"is_training",
"=",
"True",
",",
"global_pool",
"=",
"True",
",",
"output_stride",
"=",
"None",
",",
"spatial_squeeze",
"=",
"True",
",",
"reuse",
"=",
"None",
",",
"scope",
"=",
"'resnet_v2_152'",
")",
":",
"blocks",
"=",
"[",
"resnet_v2_block",
"(",
"'block1'",
",",
"base_depth",
"=",
"64",
",",
"num_units",
"=",
"3",
",",
"stride",
"=",
"2",
")",
",",
"resnet_v2_block",
"(",
"'block2'",
",",
"base_depth",
"=",
"128",
",",
"num_units",
"=",
"8",
",",
"stride",
"=",
"2",
")",
",",
"resnet_v2_block",
"(",
"'block3'",
",",
"base_depth",
"=",
"256",
",",
"num_units",
"=",
"36",
",",
"stride",
"=",
"2",
")",
",",
"resnet_v2_block",
"(",
"'block4'",
",",
"base_depth",
"=",
"512",
",",
"num_units",
"=",
"3",
",",
"stride",
"=",
"1",
")",
",",
"]",
"return",
"resnet_v2",
"(",
"inputs",
",",
"blocks",
",",
"num_classes",
",",
"is_training",
"=",
"is_training",
",",
"global_pool",
"=",
"global_pool",
",",
"output_stride",
"=",
"output_stride",
",",
"include_root_block",
"=",
"True",
",",
"spatial_squeeze",
"=",
"spatial_squeeze",
",",
"reuse",
"=",
"reuse",
",",
"scope",
"=",
"scope",
")"
] |
https://github.com/hiveml/tensorflow-grad-cam/blob/9f4c9b7a5f9c94a0490b143282abc965ce1cfb0f/model/nets/resnet_v2.py#L298-L316
|
|
AuHau/toggl-cli
|
f7a12ba821f258189f0b734bc39aca4cb1b7dc15
|
toggl/cli/commands.py
|
python
|
entry_rm
|
(ctx, spec)
|
Deletes a time entry specified by SPEC argument.
SPEC argument can be either ID or Description of the Time Entry.
In case multiple time entries are found, you will be prompted to confirm your deletion.
|
Deletes a time entry specified by SPEC argument.
|
[
"Deletes",
"a",
"time",
"entry",
"specified",
"by",
"SPEC",
"argument",
"."
] |
def entry_rm(ctx, spec):
"""
Deletes a time entry specified by SPEC argument.
SPEC argument can be either ID or Description of the Time Entry.
In case multiple time entries are found, you will be prompted to confirm your deletion.
"""
helpers.entity_remove(api.TimeEntry, spec, ('id', 'description'), obj=ctx.obj)
|
[
"def",
"entry_rm",
"(",
"ctx",
",",
"spec",
")",
":",
"helpers",
".",
"entity_remove",
"(",
"api",
".",
"TimeEntry",
",",
"spec",
",",
"(",
"'id'",
",",
"'description'",
")",
",",
"obj",
"=",
"ctx",
".",
"obj",
")"
] |
https://github.com/AuHau/toggl-cli/blob/f7a12ba821f258189f0b734bc39aca4cb1b7dc15/toggl/cli/commands.py#L413-L420
|
||
mapbox/mason
|
0296d767a588bab4ca043474c48c0f269ccb8b81
|
scripts/clang-tidy/8.0.0/yaml/__init__.py
|
python
|
safe_load_all
|
(stream)
|
return load_all(stream, SafeLoader)
|
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
|
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
|
[
"Parse",
"all",
"YAML",
"documents",
"in",
"a",
"stream",
"and",
"produce",
"corresponding",
"Python",
"objects",
".",
"Resolve",
"only",
"basic",
"YAML",
"tags",
"."
] |
def safe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
return load_all(stream, SafeLoader)
|
[
"def",
"safe_load_all",
"(",
"stream",
")",
":",
"return",
"load_all",
"(",
"stream",
",",
"SafeLoader",
")"
] |
https://github.com/mapbox/mason/blob/0296d767a588bab4ca043474c48c0f269ccb8b81/scripts/clang-tidy/8.0.0/yaml/__init__.py#L95-L101
|
|
ethereum/web3.py
|
6a90a26ea12e5a789834c9cd6a7ae6d302648f88
|
ethpm/tools/builder.py
|
python
|
normalize_compiler_output
|
(compiler_output: Dict[str, Any])
|
return {
name: normalize_contract_type(compiler_output[path][name], path)
for path, name in paths_and_names
}
|
Return compiler output with normalized fields for each contract type,
as specified in `normalize_contract_type`.
|
Return compiler output with normalized fields for each contract type,
as specified in `normalize_contract_type`.
|
[
"Return",
"compiler",
"output",
"with",
"normalized",
"fields",
"for",
"each",
"contract",
"type",
"as",
"specified",
"in",
"normalize_contract_type",
"."
] |
def normalize_compiler_output(compiler_output: Dict[str, Any]) -> Dict[str, Any]:
"""
Return compiler output with normalized fields for each contract type,
as specified in `normalize_contract_type`.
"""
paths_and_names = [
(path, contract_name)
for path in compiler_output
for contract_name in compiler_output[path].keys()
]
paths, names = zip(*paths_and_names)
if len(names) != len(set(names)):
duplicates = set([name for name in names if names.count(name) > 1])
raise ManifestBuildingError(
f"Duplicate contract types: {duplicates} were found in the compiler output."
)
return {
name: normalize_contract_type(compiler_output[path][name], path)
for path, name in paths_and_names
}
|
[
"def",
"normalize_compiler_output",
"(",
"compiler_output",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"paths_and_names",
"=",
"[",
"(",
"path",
",",
"contract_name",
")",
"for",
"path",
"in",
"compiler_output",
"for",
"contract_name",
"in",
"compiler_output",
"[",
"path",
"]",
".",
"keys",
"(",
")",
"]",
"paths",
",",
"names",
"=",
"zip",
"(",
"*",
"paths_and_names",
")",
"if",
"len",
"(",
"names",
")",
"!=",
"len",
"(",
"set",
"(",
"names",
")",
")",
":",
"duplicates",
"=",
"set",
"(",
"[",
"name",
"for",
"name",
"in",
"names",
"if",
"names",
".",
"count",
"(",
"name",
")",
">",
"1",
"]",
")",
"raise",
"ManifestBuildingError",
"(",
"f\"Duplicate contract types: {duplicates} were found in the compiler output.\"",
")",
"return",
"{",
"name",
":",
"normalize_contract_type",
"(",
"compiler_output",
"[",
"path",
"]",
"[",
"name",
"]",
",",
"path",
")",
"for",
"path",
",",
"name",
"in",
"paths_and_names",
"}"
] |
https://github.com/ethereum/web3.py/blob/6a90a26ea12e5a789834c9cd6a7ae6d302648f88/ethpm/tools/builder.py#L489-L508
|
|
zyfra/ebonite
|
b01b662c43709d152940f488574d78ff25f89ecf
|
src/ebonite/core/analyzer/base.py
|
python
|
Hook.process
|
(self, obj, **kwargs)
|
Analyzes obj and returns result. Result type is determined by specific Hook class sub-hierarchy
:param obj: object to analyze
:param kwargs: additional information to be used for analysis
:return: analysis result
|
Analyzes obj and returns result. Result type is determined by specific Hook class sub-hierarchy
|
[
"Analyzes",
"obj",
"and",
"returns",
"result",
".",
"Result",
"type",
"is",
"determined",
"by",
"specific",
"Hook",
"class",
"sub",
"-",
"hierarchy"
] |
def process(self, obj, **kwargs):
"""
Analyzes obj and returns result. Result type is determined by specific Hook class sub-hierarchy
:param obj: object to analyze
:param kwargs: additional information to be used for analysis
:return: analysis result
"""
pass
|
[
"def",
"process",
"(",
"self",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"pass"
] |
https://github.com/zyfra/ebonite/blob/b01b662c43709d152940f488574d78ff25f89ecf/src/ebonite/core/analyzer/base.py#L39-L47
|
||
ayoolaolafenwa/PixelLib
|
ae56003c416a98780141a1170c9d888fe9a31317
|
pixellib/torchbackend/instance/utils/memory.py
|
python
|
retry_if_cuda_oom
|
(func)
|
return wrapped
|
Makes a function retry itself after encountering
pytorch's CUDA OOM error.
It will first retry after calling `torch.cuda.empty_cache()`.
If that still fails, it will then retry by trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to CPU implementation.
The return values may become CPU tensors as well and it's user's
responsibility to convert it back to CUDA tensor if needed.
Args:
func: a stateless callable that takes tensor-like objects as arguments
Returns:
a callable which retries `func` if OOM is encountered.
Examples:
::
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
# output may be on CPU even if inputs are on GPU
Note:
1. When converting inputs to CPU, it will only look at each argument and check
if it has `.device` and `.to` for conversion. Nested structures of tensors
are not supported.
2. Since the function might be called more than once, it has to be
stateless.
|
Makes a function retry itself after encountering
pytorch's CUDA OOM error.
It will first retry after calling `torch.cuda.empty_cache()`.
|
[
"Makes",
"a",
"function",
"retry",
"itself",
"after",
"encountering",
"pytorch",
"s",
"CUDA",
"OOM",
"error",
".",
"It",
"will",
"first",
"retry",
"after",
"calling",
"torch",
".",
"cuda",
".",
"empty_cache",
"()",
"."
] |
def retry_if_cuda_oom(func):
"""
Makes a function retry itself after encountering
pytorch's CUDA OOM error.
It will first retry after calling `torch.cuda.empty_cache()`.
If that still fails, it will then retry by trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to CPU implementation.
The return values may become CPU tensors as well and it's user's
responsibility to convert it back to CUDA tensor if needed.
Args:
func: a stateless callable that takes tensor-like objects as arguments
Returns:
a callable which retries `func` if OOM is encountered.
Examples:
::
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
# output may be on CPU even if inputs are on GPU
Note:
1. When converting inputs to CPU, it will only look at each argument and check
if it has `.device` and `.to` for conversion. Nested structures of tensors
are not supported.
2. Since the function might be called more than once, it has to be
stateless.
"""
def maybe_to_cpu(x):
try:
like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
except AttributeError:
like_gpu_tensor = False
if like_gpu_tensor:
return x.to(device="cpu")
else:
return x
@wraps(func)
def wrapped(*args, **kwargs):
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Try on CPU. This slows down the code significantly, therefore print a notice.
logger = logging.getLogger(__name__)
logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func)))
new_args = (maybe_to_cpu(x) for x in args)
new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
return func(*new_args, **new_kwargs)
return wrapped
|
[
"def",
"retry_if_cuda_oom",
"(",
"func",
")",
":",
"def",
"maybe_to_cpu",
"(",
"x",
")",
":",
"try",
":",
"like_gpu_tensor",
"=",
"x",
".",
"device",
".",
"type",
"==",
"\"cuda\"",
"and",
"hasattr",
"(",
"x",
",",
"\"to\"",
")",
"except",
"AttributeError",
":",
"like_gpu_tensor",
"=",
"False",
"if",
"like_gpu_tensor",
":",
"return",
"x",
".",
"to",
"(",
"device",
"=",
"\"cpu\"",
")",
"else",
":",
"return",
"x",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"_ignore_torch_cuda_oom",
"(",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Clear cache and retry",
"torch",
".",
"cuda",
".",
"empty_cache",
"(",
")",
"with",
"_ignore_torch_cuda_oom",
"(",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Try on CPU. This slows down the code significantly, therefore print a notice.",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"info",
"(",
"\"Attempting to copy inputs of {} to CPU due to CUDA OOM\"",
".",
"format",
"(",
"str",
"(",
"func",
")",
")",
")",
"new_args",
"=",
"(",
"maybe_to_cpu",
"(",
"x",
")",
"for",
"x",
"in",
"args",
")",
"new_kwargs",
"=",
"{",
"k",
":",
"maybe_to_cpu",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"return",
"func",
"(",
"*",
"new_args",
",",
"*",
"*",
"new_kwargs",
")",
"return",
"wrapped"
] |
https://github.com/ayoolaolafenwa/PixelLib/blob/ae56003c416a98780141a1170c9d888fe9a31317/pixellib/torchbackend/instance/utils/memory.py#L26-L84
|
|
beer-garden/beer-garden
|
7b1b7dd64ab1f19f370451c9438362d11f3a06e4
|
src/app/beer_garden/api/http/handlers/v1/garden.py
|
python
|
GardenListAPI.get
|
(self)
|
---
summary: Retrieve a list of Gardens
responses:
200:
description: Garden with the given garden_name
schema:
type: array
items:
$ref: '#/definitions/Garden'
404:
$ref: '#/definitions/404Error'
50x:
$ref: '#/definitions/50xError'
tags:
- Garden
|
---
summary: Retrieve a list of Gardens
responses:
200:
description: Garden with the given garden_name
schema:
type: array
items:
$ref: '#/definitions/Garden'
404:
$ref: '#/definitions/404Error'
50x:
$ref: '#/definitions/50xError'
tags:
- Garden
|
[
"---",
"summary",
":",
"Retrieve",
"a",
"list",
"of",
"Gardens",
"responses",
":",
"200",
":",
"description",
":",
"Garden",
"with",
"the",
"given",
"garden_name",
"schema",
":",
"type",
":",
"array",
"items",
":",
"$ref",
":",
"#",
"/",
"definitions",
"/",
"Garden",
"404",
":",
"$ref",
":",
"#",
"/",
"definitions",
"/",
"404Error",
"50x",
":",
"$ref",
":",
"#",
"/",
"definitions",
"/",
"50xError",
"tags",
":",
"-",
"Garden"
] |
async def get(self):
"""
---
summary: Retrieve a list of Gardens
responses:
200:
description: Garden with the given garden_name
schema:
type: array
items:
$ref: '#/definitions/Garden'
404:
$ref: '#/definitions/404Error'
50x:
$ref: '#/definitions/50xError'
tags:
- Garden
"""
permitted_gardens = self.permissioned_queryset(Garden, GARDEN_READ)
response = MongoParser.serialize(permitted_gardens, to_string=True)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(response)
|
[
"async",
"def",
"get",
"(",
"self",
")",
":",
"permitted_gardens",
"=",
"self",
".",
"permissioned_queryset",
"(",
"Garden",
",",
"GARDEN_READ",
")",
"response",
"=",
"MongoParser",
".",
"serialize",
"(",
"permitted_gardens",
",",
"to_string",
"=",
"True",
")",
"self",
".",
"set_header",
"(",
"\"Content-Type\"",
",",
"\"application/json; charset=UTF-8\"",
")",
"self",
".",
"write",
"(",
"response",
")"
] |
https://github.com/beer-garden/beer-garden/blob/7b1b7dd64ab1f19f370451c9438362d11f3a06e4/src/app/beer_garden/api/http/handlers/v1/garden.py#L163-L186
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/kombu/pools.py
|
python
|
PoolGroup.__missing__
|
(self, resource)
|
return k
|
[] |
def __missing__(self, resource):
limit = self.limit
if limit is use_global_limit:
limit = get_limit()
k = self[resource] = self.create(resource, limit)
return k
|
[
"def",
"__missing__",
"(",
"self",
",",
"resource",
")",
":",
"limit",
"=",
"self",
".",
"limit",
"if",
"limit",
"is",
"use_global_limit",
":",
"limit",
"=",
"get_limit",
"(",
")",
"k",
"=",
"self",
"[",
"resource",
"]",
"=",
"self",
".",
"create",
"(",
"resource",
",",
"limit",
")",
"return",
"k"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/kombu/pools.py#L91-L96
|
|||
snakemake/snakemake
|
987282dde8a2db5174414988c134a39ae8836a61
|
snakemake/dag.py
|
python
|
DAG.handle_protected
|
(self, job)
|
Write-protect output files that are marked with protected().
|
Write-protect output files that are marked with protected().
|
[
"Write",
"-",
"protect",
"output",
"files",
"that",
"are",
"marked",
"with",
"protected",
"()",
"."
] |
def handle_protected(self, job):
"""Write-protect output files that are marked with protected()."""
for f in job.expanded_output:
if f in job.protected_output:
logger.info("Write-protecting output file {}.".format(f))
f.protect()
|
[
"def",
"handle_protected",
"(",
"self",
",",
"job",
")",
":",
"for",
"f",
"in",
"job",
".",
"expanded_output",
":",
"if",
"f",
"in",
"job",
".",
"protected_output",
":",
"logger",
".",
"info",
"(",
"\"Write-protecting output file {}.\"",
".",
"format",
"(",
"f",
")",
")",
"f",
".",
"protect",
"(",
")"
] |
https://github.com/snakemake/snakemake/blob/987282dde8a2db5174414988c134a39ae8836a61/snakemake/dag.py#L573-L578
|
||
pkrumins/hacker-top
|
ea99ac9919ea412627b7960650b3fef26922226f
|
pyhackerstories.py
|
python
|
print_stories_paragraph
|
(stories)
|
Given a list of Stories, prints them out paragraph by paragraph
|
Given a list of Stories, prints them out paragraph by paragraph
|
[
"Given",
"a",
"list",
"of",
"Stories",
"prints",
"them",
"out",
"paragraph",
"by",
"paragraph"
] |
def print_stories_paragraph(stories):
"""
Given a list of Stories, prints them out paragraph by paragraph
"""
for story in stories:
print 'position:', story.position
print 'id:', story.id
print 'title:', story.title
print 'url:', story.url
print 'score:', story.score
print 'comments:', story.comments
print 'user:', story.user
print 'unix_time:', story.unix_time
print 'human_time:', story.human_time
print
|
[
"def",
"print_stories_paragraph",
"(",
"stories",
")",
":",
"for",
"story",
"in",
"stories",
":",
"print",
"'position:'",
",",
"story",
".",
"position",
"print",
"'id:'",
",",
"story",
".",
"id",
"print",
"'title:'",
",",
"story",
".",
"title",
"print",
"'url:'",
",",
"story",
".",
"url",
"print",
"'score:'",
",",
"story",
".",
"score",
"print",
"'comments:'",
",",
"story",
".",
"comments",
"print",
"'user:'",
",",
"story",
".",
"user",
"print",
"'unix_time:'",
",",
"story",
".",
"unix_time",
"print",
"'human_time:'",
",",
"story",
".",
"human_time",
"print"
] |
https://github.com/pkrumins/hacker-top/blob/ea99ac9919ea412627b7960650b3fef26922226f/pyhackerstories.py#L234-L249
|
||
okfn-brasil/querido-diario
|
294ed27e511d84887aee574922107c99a7ab3b8e
|
data_collection/gazette/spiders/base/fecam.py
|
python
|
FecamGazetteSpider.parse_pagination
|
(self, response)
|
return [
scrapy.Request(
f"{self.URL}?q={self.FECAM_QUERY}&Search_page={i}", callback=self.parse
)
for i in range(1, self.get_last_page(response) + 1)
]
|
This parse function is used to get all the pages available and
return request object for each one
|
This parse function is used to get all the pages available and
return request object for each one
|
[
"This",
"parse",
"function",
"is",
"used",
"to",
"get",
"all",
"the",
"pages",
"available",
"and",
"return",
"request",
"object",
"for",
"each",
"one"
] |
def parse_pagination(self, response):
"""
This parse function is used to get all the pages available and
return request object for each one
"""
return [
scrapy.Request(
f"{self.URL}?q={self.FECAM_QUERY}&Search_page={i}", callback=self.parse
)
for i in range(1, self.get_last_page(response) + 1)
]
|
[
"def",
"parse_pagination",
"(",
"self",
",",
"response",
")",
":",
"return",
"[",
"scrapy",
".",
"Request",
"(",
"f\"{self.URL}?q={self.FECAM_QUERY}&Search_page={i}\"",
",",
"callback",
"=",
"self",
".",
"parse",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"self",
".",
"get_last_page",
"(",
"response",
")",
"+",
"1",
")",
"]"
] |
https://github.com/okfn-brasil/querido-diario/blob/294ed27e511d84887aee574922107c99a7ab3b8e/data_collection/gazette/spiders/base/fecam.py#L20-L30
|
|
python/cpython
|
e13cdca0f5224ec4e23bdd04bb3120506964bc8b
|
Lib/logging/__init__.py
|
python
|
addLevelName
|
(level, levelName)
|
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
|
Associate 'levelName' with 'level'.
|
[
"Associate",
"levelName",
"with",
"level",
"."
] |
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelToName[level] = levelName
_nameToLevel[levelName] = level
finally:
_releaseLock()
|
[
"def",
"addLevelName",
"(",
"level",
",",
"levelName",
")",
":",
"_acquireLock",
"(",
")",
"try",
":",
"#unlikely to cause an exception, but you never know...",
"_levelToName",
"[",
"level",
"]",
"=",
"levelName",
"_nameToLevel",
"[",
"levelName",
"]",
"=",
"level",
"finally",
":",
"_releaseLock",
"(",
")"
] |
https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/logging/__init__.py#L149-L160
|
||
jython/frozen-mirror
|
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
|
lib-python/2.7/distutils/cmd.py
|
python
|
Command.copy_tree
|
(self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0,
level=1)
|
return dir_util.copy_tree(
infile, outfile,
preserve_mode,preserve_times,preserve_symlinks,
not self.force,
dry_run=self.dry_run)
|
Copy an entire directory tree respecting verbose, dry-run,
and force flags.
|
Copy an entire directory tree respecting verbose, dry-run,
and force flags.
|
[
"Copy",
"an",
"entire",
"directory",
"tree",
"respecting",
"verbose",
"dry",
"-",
"run",
"and",
"force",
"flags",
"."
] |
def copy_tree(self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0,
level=1):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
"""
return dir_util.copy_tree(
infile, outfile,
preserve_mode,preserve_times,preserve_symlinks,
not self.force,
dry_run=self.dry_run)
|
[
"def",
"copy_tree",
"(",
"self",
",",
"infile",
",",
"outfile",
",",
"preserve_mode",
"=",
"1",
",",
"preserve_times",
"=",
"1",
",",
"preserve_symlinks",
"=",
"0",
",",
"level",
"=",
"1",
")",
":",
"return",
"dir_util",
".",
"copy_tree",
"(",
"infile",
",",
"outfile",
",",
"preserve_mode",
",",
"preserve_times",
",",
"preserve_symlinks",
",",
"not",
"self",
".",
"force",
",",
"dry_run",
"=",
"self",
".",
"dry_run",
")"
] |
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/distutils/cmd.py#L367-L377
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/django/db/migrations/questioner.py
|
python
|
InteractiveMigrationQuestioner.ask_rename_model
|
(self, old_model_state, new_model_state)
|
return self._boolean_input(msg % (old_model_state.app_label, old_model_state.name,
new_model_state.name), False)
|
Was this model really renamed?
|
Was this model really renamed?
|
[
"Was",
"this",
"model",
"really",
"renamed?"
] |
def ask_rename_model(self, old_model_state, new_model_state):
"Was this model really renamed?"
msg = "Did you rename the %s.%s model to %s? [y/N]"
return self._boolean_input(msg % (old_model_state.app_label, old_model_state.name,
new_model_state.name), False)
|
[
"def",
"ask_rename_model",
"(",
"self",
",",
"old_model_state",
",",
"new_model_state",
")",
":",
"msg",
"=",
"\"Did you rename the %s.%s model to %s? [y/N]\"",
"return",
"self",
".",
"_boolean_input",
"(",
"msg",
"%",
"(",
"old_model_state",
".",
"app_label",
",",
"old_model_state",
".",
"name",
",",
"new_model_state",
".",
"name",
")",
",",
"False",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/db/migrations/questioner.py#L199-L203
|
|
CLUEbenchmark/CLUEPretrainedModels
|
b384fd41665a8261f9c689c940cf750b3bc21fce
|
baselines/models/roberta_wwm_large_ext/run_ner.py
|
python
|
InputExample.__init__
|
(self, guid, text, label=None)
|
Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
|
Constructs a InputExample.
|
[
"Constructs",
"a",
"InputExample",
"."
] |
def __init__(self, guid, text, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text = text
self.label = label
|
[
"def",
"__init__",
"(",
"self",
",",
"guid",
",",
"text",
",",
"label",
"=",
"None",
")",
":",
"self",
".",
"guid",
"=",
"guid",
"self",
".",
"text",
"=",
"text",
"self",
".",
"label",
"=",
"label"
] |
https://github.com/CLUEbenchmark/CLUEPretrainedModels/blob/b384fd41665a8261f9c689c940cf750b3bc21fce/baselines/models/roberta_wwm_large_ext/run_ner.py#L123-L135
|
||
F8LEFT/DecLLVM
|
d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c
|
python/idaapi.py
|
python
|
is_restype_const
|
(*args)
|
return _idaapi.is_restype_const(*args)
|
is_restype_const(til, type) -> bool
|
is_restype_const(til, type) -> bool
|
[
"is_restype_const",
"(",
"til",
"type",
")",
"-",
">",
"bool"
] |
def is_restype_const(*args):
"""
is_restype_const(til, type) -> bool
"""
return _idaapi.is_restype_const(*args)
|
[
"def",
"is_restype_const",
"(",
"*",
"args",
")",
":",
"return",
"_idaapi",
".",
"is_restype_const",
"(",
"*",
"args",
")"
] |
https://github.com/F8LEFT/DecLLVM/blob/d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c/python/idaapi.py#L28668-L28672
|
|
dit/dit
|
2853cb13110c5a5b2fa7ad792e238e2177013da2
|
dit/divergences/kullback_leibler_divergence.py
|
python
|
kullback_leibler_divergence
|
(dist1, dist2, rvs=None, crvs=None, rv_mode=None)
|
return dkl
|
The Kullback-Liebler divergence between `dist1` and `dist2`.
Parameters
----------
dist1 : Distribution
The first distribution in the Kullback-Leibler divergence.
dist2 : Distribution
The second distribution in the Kullback-Leibler divergence.
rvs : list, None
The indexes of the random variable used to calculate the
Kullback-Leibler divergence between. If None, then the Kullback-Leibler
divergence is calculated over all random variables.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
dkl : float
The Kullback-Leibler divergence between `dist1` and `dist2`.
Raises
------
ditException
Raised if either `dist1` or `dist2` doesn't have `rvs` or, if `rvs` is
None, if `dist2` has an outcome length different than `dist1`.
|
The Kullback-Liebler divergence between `dist1` and `dist2`.
|
[
"The",
"Kullback",
"-",
"Liebler",
"divergence",
"between",
"dist1",
"and",
"dist2",
"."
] |
def kullback_leibler_divergence(dist1, dist2, rvs=None, crvs=None, rv_mode=None):
"""
The Kullback-Liebler divergence between `dist1` and `dist2`.
Parameters
----------
dist1 : Distribution
The first distribution in the Kullback-Leibler divergence.
dist2 : Distribution
The second distribution in the Kullback-Leibler divergence.
rvs : list, None
The indexes of the random variable used to calculate the
Kullback-Leibler divergence between. If None, then the Kullback-Leibler
divergence is calculated over all random variables.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
dkl : float
The Kullback-Leibler divergence between `dist1` and `dist2`.
Raises
------
ditException
Raised if either `dist1` or `dist2` doesn't have `rvs` or, if `rvs` is
None, if `dist2` has an outcome length different than `dist1`.
"""
xh = cross_entropy(dist1, dist2, rvs, crvs, rv_mode)
h = entropy(dist1, rvs, crvs, rv_mode)
dkl = xh - h
return dkl
|
[
"def",
"kullback_leibler_divergence",
"(",
"dist1",
",",
"dist2",
",",
"rvs",
"=",
"None",
",",
"crvs",
"=",
"None",
",",
"rv_mode",
"=",
"None",
")",
":",
"xh",
"=",
"cross_entropy",
"(",
"dist1",
",",
"dist2",
",",
"rvs",
",",
"crvs",
",",
"rv_mode",
")",
"h",
"=",
"entropy",
"(",
"dist1",
",",
"rvs",
",",
"crvs",
",",
"rv_mode",
")",
"dkl",
"=",
"xh",
"-",
"h",
"return",
"dkl"
] |
https://github.com/dit/dit/blob/2853cb13110c5a5b2fa7ad792e238e2177013da2/dit/divergences/kullback_leibler_divergence.py#L15-L51
|
|
wxWidgets/Phoenix
|
b2199e299a6ca6d866aa6f3d0888499136ead9d6
|
wx/py/frame.py
|
python
|
Frame.LoadSettings
|
(self, config)
|
Called by derived classes to load settings specific to the Frame
|
Called by derived classes to load settings specific to the Frame
|
[
"Called",
"by",
"derived",
"classes",
"to",
"load",
"settings",
"specific",
"to",
"the",
"Frame"
] |
def LoadSettings(self, config):
"""Called by derived classes to load settings specific to the Frame"""
pos = wx.Point(config.ReadInt('Window/PosX', -1),
config.ReadInt('Window/PosY', -1))
size = wx.Size(config.ReadInt('Window/Width', -1),
config.ReadInt('Window/Height', -1))
self.SetSize(size)
self.Move(pos)
|
[
"def",
"LoadSettings",
"(",
"self",
",",
"config",
")",
":",
"pos",
"=",
"wx",
".",
"Point",
"(",
"config",
".",
"ReadInt",
"(",
"'Window/PosX'",
",",
"-",
"1",
")",
",",
"config",
".",
"ReadInt",
"(",
"'Window/PosY'",
",",
"-",
"1",
")",
")",
"size",
"=",
"wx",
".",
"Size",
"(",
"config",
".",
"ReadInt",
"(",
"'Window/Width'",
",",
"-",
"1",
")",
",",
"config",
".",
"ReadInt",
"(",
"'Window/Height'",
",",
"-",
"1",
")",
")",
"self",
".",
"SetSize",
"(",
"size",
")",
"self",
".",
"Move",
"(",
"pos",
")"
] |
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/py/frame.py#L715-L724
|
||
1adrianb/face-alignment
|
c49ca6fef8ffa95a0ac7ce698e0b752ac91f6d42
|
face_alignment/detection/sfd/sfd_detector.py
|
python
|
SFDDetector.reference_scale
|
(self)
|
return 195
|
[] |
def reference_scale(self):
return 195
|
[
"def",
"reference_scale",
"(",
"self",
")",
":",
"return",
"195"
] |
https://github.com/1adrianb/face-alignment/blob/c49ca6fef8ffa95a0ac7ce698e0b752ac91f6d42/face_alignment/detection/sfd/sfd_detector.py#L62-L63
|
|||
Yonsm/.homeassistant
|
4d9a0070d0fcd8a5ded46e7884da9a4494bbbf26
|
extras/homeassistant/loader.py
|
python
|
Integration.get_component
|
(self)
|
return cache[self.domain]
|
Return the component.
|
Return the component.
|
[
"Return",
"the",
"component",
"."
] |
def get_component(self) -> ModuleType:
"""Return the component."""
cache = self.hass.data.setdefault(DATA_COMPONENTS, {})
if self.domain not in cache:
cache[self.domain] = importlib.import_module(self.pkg_path)
return cache[self.domain]
|
[
"def",
"get_component",
"(",
"self",
")",
"->",
"ModuleType",
":",
"cache",
"=",
"self",
".",
"hass",
".",
"data",
".",
"setdefault",
"(",
"DATA_COMPONENTS",
",",
"{",
"}",
")",
"if",
"self",
".",
"domain",
"not",
"in",
"cache",
":",
"cache",
"[",
"self",
".",
"domain",
"]",
"=",
"importlib",
".",
"import_module",
"(",
"self",
".",
"pkg_path",
")",
"return",
"cache",
"[",
"self",
".",
"domain",
"]"
] |
https://github.com/Yonsm/.homeassistant/blob/4d9a0070d0fcd8a5ded46e7884da9a4494bbbf26/extras/homeassistant/loader.py#L486-L491
|
|
pjlantz/droidbox
|
519ddd198ccef2e0d27e12929f25702f6a385d94
|
APIMonitor/androguard/core/bytecodes/dvm.py
|
python
|
Instruction.get_length
|
(self)
|
Return the length of the instruction
:rtype: int
|
Return the length of the instruction
|
[
"Return",
"the",
"length",
"of",
"the",
"instruction"
] |
def get_length(self) :
"""
Return the length of the instruction
:rtype: int
"""
raise("not implemented")
|
[
"def",
"get_length",
"(",
"self",
")",
":",
"raise",
"(",
"\"not implemented\"",
")"
] |
https://github.com/pjlantz/droidbox/blob/519ddd198ccef2e0d27e12929f25702f6a385d94/APIMonitor/androguard/core/bytecodes/dvm.py#L3749-L3755
|
||
CGATOxford/cgat
|
326aad4694bdfae8ddc194171bb5d73911243947
|
CGAT/Histogram.py
|
python
|
Calculate
|
(values,
num_bins=None,
min_value=None,
max_value=None,
intervals=None,
increment=None,
combine=None,
no_empty_bins=0,
dynamic_bins=False,
ignore_out_of_range=True)
|
return Convert(scipy.stats.histogram2(values, intervals), intervals, no_empty_bins)
|
calculate a histogram based on a list or tuple of values.
use scipy for calculation.
|
calculate a histogram based on a list or tuple of values.
|
[
"calculate",
"a",
"histogram",
"based",
"on",
"a",
"list",
"or",
"tuple",
"of",
"values",
"."
] |
def Calculate(values,
num_bins=None,
min_value=None,
max_value=None,
intervals=None,
increment=None,
combine=None,
no_empty_bins=0,
dynamic_bins=False,
ignore_out_of_range=True):
"""calculate a histogram based on a list or tuple of values.
use scipy for calculation.
"""
if len(values) == 0:
return []
if not intervals:
if min_value is None:
min_value = min(values)
if max_value is None:
max_value = max(values)
if dynamic_bins:
intervals = list(
set([x for x in values if min_value <= x <= max_value]))
intervals.sort()
else:
if increment:
step_size = increment
elif num_bins and max_value:
step_size = float(max_value - min_value) / float(num_bins)
else:
step_size = 1.0
num_bins = int(
math.ceil((float(max_value) - float(min_value)) / float(step_size)))
intervals = [float(min_value) + float(x) * float(step_size)
for x in range(num_bins + 1)]
if not ignore_out_of_range:
new_values = []
for v in values:
if v < min_value:
v = min_value
elif v > max_value:
v = max_value
new_values.append(v)
values = new_values
return Convert(scipy.stats.histogram2(values, intervals), intervals, no_empty_bins)
|
[
"def",
"Calculate",
"(",
"values",
",",
"num_bins",
"=",
"None",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"intervals",
"=",
"None",
",",
"increment",
"=",
"None",
",",
"combine",
"=",
"None",
",",
"no_empty_bins",
"=",
"0",
",",
"dynamic_bins",
"=",
"False",
",",
"ignore_out_of_range",
"=",
"True",
")",
":",
"if",
"len",
"(",
"values",
")",
"==",
"0",
":",
"return",
"[",
"]",
"if",
"not",
"intervals",
":",
"if",
"min_value",
"is",
"None",
":",
"min_value",
"=",
"min",
"(",
"values",
")",
"if",
"max_value",
"is",
"None",
":",
"max_value",
"=",
"max",
"(",
"values",
")",
"if",
"dynamic_bins",
":",
"intervals",
"=",
"list",
"(",
"set",
"(",
"[",
"x",
"for",
"x",
"in",
"values",
"if",
"min_value",
"<=",
"x",
"<=",
"max_value",
"]",
")",
")",
"intervals",
".",
"sort",
"(",
")",
"else",
":",
"if",
"increment",
":",
"step_size",
"=",
"increment",
"elif",
"num_bins",
"and",
"max_value",
":",
"step_size",
"=",
"float",
"(",
"max_value",
"-",
"min_value",
")",
"/",
"float",
"(",
"num_bins",
")",
"else",
":",
"step_size",
"=",
"1.0",
"num_bins",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"(",
"float",
"(",
"max_value",
")",
"-",
"float",
"(",
"min_value",
")",
")",
"/",
"float",
"(",
"step_size",
")",
")",
")",
"intervals",
"=",
"[",
"float",
"(",
"min_value",
")",
"+",
"float",
"(",
"x",
")",
"*",
"float",
"(",
"step_size",
")",
"for",
"x",
"in",
"range",
"(",
"num_bins",
"+",
"1",
")",
"]",
"if",
"not",
"ignore_out_of_range",
":",
"new_values",
"=",
"[",
"]",
"for",
"v",
"in",
"values",
":",
"if",
"v",
"<",
"min_value",
":",
"v",
"=",
"min_value",
"elif",
"v",
">",
"max_value",
":",
"v",
"=",
"max_value",
"new_values",
".",
"append",
"(",
"v",
")",
"values",
"=",
"new_values",
"return",
"Convert",
"(",
"scipy",
".",
"stats",
".",
"histogram2",
"(",
"values",
",",
"intervals",
")",
",",
"intervals",
",",
"no_empty_bins",
")"
] |
https://github.com/CGATOxford/cgat/blob/326aad4694bdfae8ddc194171bb5d73911243947/CGAT/Histogram.py#L106-L160
|
|
DingGuodong/LinuxBashShellScriptForOps
|
d5727b985f920292a10698a3c9751d5dff5fc1a3
|
projects/LinuxSystemOps/AutoDevOps/Fabric/Fabric1.x/pyFabricAndStrictCapistrano.py
|
python
|
update_env
|
()
|
Update servers environment on the remote servers
|
Update servers environment on the remote servers
|
[
"Update",
"servers",
"environment",
"on",
"the",
"remote",
"servers"
] |
def update_env():
"""Update servers environment on the remote servers"""
sudo_run("cd %(current_release)s; %(pip_install_command)s" % {'current_release': env.current_release,
'pip_install_command': env.pip_install_command})
permissions()
|
[
"def",
"update_env",
"(",
")",
":",
"sudo_run",
"(",
"\"cd %(current_release)s; %(pip_install_command)s\"",
"%",
"{",
"'current_release'",
":",
"env",
".",
"current_release",
",",
"'pip_install_command'",
":",
"env",
".",
"pip_install_command",
"}",
")",
"permissions",
"(",
")"
] |
https://github.com/DingGuodong/LinuxBashShellScriptForOps/blob/d5727b985f920292a10698a3c9751d5dff5fc1a3/projects/LinuxSystemOps/AutoDevOps/Fabric/Fabric1.x/pyFabricAndStrictCapistrano.py#L231-L235
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/sql/schema.py
|
python
|
Sequence.create
|
(self, bind=None, checkfirst=True)
|
Creates this sequence in the database.
|
Creates this sequence in the database.
|
[
"Creates",
"this",
"sequence",
"in",
"the",
"database",
"."
] |
def create(self, bind=None, checkfirst=True):
"""Creates this sequence in the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst)
|
[
"def",
"create",
"(",
"self",
",",
"bind",
"=",
"None",
",",
"checkfirst",
"=",
"True",
")",
":",
"if",
"bind",
"is",
"None",
":",
"bind",
"=",
"_bind_or_error",
"(",
"self",
")",
"bind",
".",
"_run_visitor",
"(",
"ddl",
".",
"SchemaGenerator",
",",
"self",
",",
"checkfirst",
"=",
"checkfirst",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/sql/schema.py#L2349-L2356
|
||
Tautulli/Tautulli
|
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
|
lib/dns/asyncbackend.py
|
python
|
set_default_backend
|
(name)
|
return _default_backend
|
Set the default backend.
It's not normally necessary to call this method, as
``get_default_backend()`` will initialize the backend
appropriately in many cases. If ``sniffio`` is not installed, or
in testing situations, this function allows the backend to be set
explicitly.
|
Set the default backend.
|
[
"Set",
"the",
"default",
"backend",
"."
] |
def set_default_backend(name):
"""Set the default backend.
It's not normally necessary to call this method, as
``get_default_backend()`` will initialize the backend
appropriately in many cases. If ``sniffio`` is not installed, or
in testing situations, this function allows the backend to be set
explicitly.
"""
global _default_backend
_default_backend = get_backend(name)
return _default_backend
|
[
"def",
"set_default_backend",
"(",
"name",
")",
":",
"global",
"_default_backend",
"_default_backend",
"=",
"get_backend",
"(",
"name",
")",
"return",
"_default_backend"
] |
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/dns/asyncbackend.py#L85-L96
|
|
zeroSteiner/mayhem
|
cdcc5dacd787b55016bcffc8e180284088acf29e
|
mayhem/proc/linux.py
|
python
|
get_errno
|
()
|
return get_errno_loc()[0]
|
Get the value of the error from the last function call.
:return: The error number from libc.
:rtype: int
|
Get the value of the error from the last function call.
|
[
"Get",
"the",
"value",
"of",
"the",
"error",
"from",
"the",
"last",
"function",
"call",
"."
] |
def get_errno():
"""
Get the value of the error from the last function call.
:return: The error number from libc.
:rtype: int
"""
get_errno_loc = libc.__errno_location
get_errno_loc.restype = ctypes.POINTER(ctypes.c_int)
return get_errno_loc()[0]
|
[
"def",
"get_errno",
"(",
")",
":",
"get_errno_loc",
"=",
"libc",
".",
"__errno_location",
"get_errno_loc",
".",
"restype",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_int",
")",
"return",
"get_errno_loc",
"(",
")",
"[",
"0",
"]"
] |
https://github.com/zeroSteiner/mayhem/blob/cdcc5dacd787b55016bcffc8e180284088acf29e/mayhem/proc/linux.py#L107-L116
|
|
mcneel/rhinoscriptsyntax
|
c49bd0bf24c2513bdcb84d1bf307144489600fd9
|
Scripts/rhinoscript/curve.py
|
python
|
ExtendCurve
|
(curve_id, extension_type, side, boundary_object_ids)
|
return scriptcontext.errorhandler()
|
Extends a non-closed curve object by a line, arc, or smooth extension
until it intersects a collection of objects.
Parameters:
curve_id (guid): identifier of curve to extend
extension_type (number):
0 = line
1 = arc
2 = smooth
side (number):
0=extend from the start of the curve
1=extend from the end of the curve
2=extend from both the start and the end of the curve
boundary_object_ids (guid): curve, surface, and polysurface objects to extend to
Returns:
guid: The identifier of the new object if successful.
None: if not successful
Example:
import rhinoscriptsyntax as rs
filter = rs.filter.curve | rs.filter.surface | rs.filter.polysurface
objects = rs.GetObjects("Select boundary objects", filter)
if objects:
curve = rs.GetObject("Select curve to extend", rs.filter.curve)
if curve: rs.ExtendCurve( curve, 2, 1, objects )
See Also:
ExtendCurveLength
ExtendCurvePoint
|
Extends a non-closed curve object by a line, arc, or smooth extension
until it intersects a collection of objects.
Parameters:
curve_id (guid): identifier of curve to extend
extension_type (number):
0 = line
1 = arc
2 = smooth
side (number):
0=extend from the start of the curve
1=extend from the end of the curve
2=extend from both the start and the end of the curve
boundary_object_ids (guid): curve, surface, and polysurface objects to extend to
Returns:
guid: The identifier of the new object if successful.
None: if not successful
Example:
import rhinoscriptsyntax as rs
filter = rs.filter.curve | rs.filter.surface | rs.filter.polysurface
objects = rs.GetObjects("Select boundary objects", filter)
if objects:
curve = rs.GetObject("Select curve to extend", rs.filter.curve)
if curve: rs.ExtendCurve( curve, 2, 1, objects )
See Also:
ExtendCurveLength
ExtendCurvePoint
|
[
"Extends",
"a",
"non",
"-",
"closed",
"curve",
"object",
"by",
"a",
"line",
"arc",
"or",
"smooth",
"extension",
"until",
"it",
"intersects",
"a",
"collection",
"of",
"objects",
".",
"Parameters",
":",
"curve_id",
"(",
"guid",
")",
":",
"identifier",
"of",
"curve",
"to",
"extend",
"extension_type",
"(",
"number",
")",
":",
"0",
"=",
"line",
"1",
"=",
"arc",
"2",
"=",
"smooth",
"side",
"(",
"number",
")",
":",
"0",
"=",
"extend",
"from",
"the",
"start",
"of",
"the",
"curve",
"1",
"=",
"extend",
"from",
"the",
"end",
"of",
"the",
"curve",
"2",
"=",
"extend",
"from",
"both",
"the",
"start",
"and",
"the",
"end",
"of",
"the",
"curve",
"boundary_object_ids",
"(",
"guid",
")",
":",
"curve",
"surface",
"and",
"polysurface",
"objects",
"to",
"extend",
"to",
"Returns",
":",
"guid",
":",
"The",
"identifier",
"of",
"the",
"new",
"object",
"if",
"successful",
".",
"None",
":",
"if",
"not",
"successful",
"Example",
":",
"import",
"rhinoscriptsyntax",
"as",
"rs",
"filter",
"=",
"rs",
".",
"filter",
".",
"curve",
"|",
"rs",
".",
"filter",
".",
"surface",
"|",
"rs",
".",
"filter",
".",
"polysurface",
"objects",
"=",
"rs",
".",
"GetObjects",
"(",
"Select",
"boundary",
"objects",
"filter",
")",
"if",
"objects",
":",
"curve",
"=",
"rs",
".",
"GetObject",
"(",
"Select",
"curve",
"to",
"extend",
"rs",
".",
"filter",
".",
"curve",
")",
"if",
"curve",
":",
"rs",
".",
"ExtendCurve",
"(",
"curve",
"2",
"1",
"objects",
")",
"See",
"Also",
":",
"ExtendCurveLength",
"ExtendCurvePoint"
] |
def ExtendCurve(curve_id, extension_type, side, boundary_object_ids):
"""Extends a non-closed curve object by a line, arc, or smooth extension
until it intersects a collection of objects.
Parameters:
curve_id (guid): identifier of curve to extend
extension_type (number):
0 = line
1 = arc
2 = smooth
side (number):
0=extend from the start of the curve
1=extend from the end of the curve
2=extend from both the start and the end of the curve
boundary_object_ids (guid): curve, surface, and polysurface objects to extend to
Returns:
guid: The identifier of the new object if successful.
None: if not successful
Example:
import rhinoscriptsyntax as rs
filter = rs.filter.curve | rs.filter.surface | rs.filter.polysurface
objects = rs.GetObjects("Select boundary objects", filter)
if objects:
curve = rs.GetObject("Select curve to extend", rs.filter.curve)
if curve: rs.ExtendCurve( curve, 2, 1, objects )
See Also:
ExtendCurveLength
ExtendCurvePoint
"""
curve = rhutil.coercecurve(curve_id, -1, True)
if extension_type==0: extension_type = Rhino.Geometry.CurveExtensionStyle.Line
elif extension_type==1: extension_type = Rhino.Geometry.CurveExtensionStyle.Arc
elif extension_type==2: extension_type = Rhino.Geometry.CurveExtensionStyle.Smooth
else: raise ValueError("extension_type must be 0, 1, or 2")
if side==0: side = Rhino.Geometry.CurveEnd.Start
elif side==1: side = Rhino.Geometry.CurveEnd.End
elif side==2: side = Rhino.Geometry.CurveEnd.Both
else: raise ValueError("side must be 0, 1, or 2")
rhobjs = [rhutil.coercerhinoobject(id) for id in boundary_object_ids]
if not rhobjs: raise ValueError("boundary_object_ids must contain at least one item")
geometry = [obj.Geometry for obj in rhobjs]
newcurve = curve.Extend(side, extension_type, geometry)
if newcurve and newcurve.IsValid:
curve_id = rhutil.coerceguid(curve_id, True)
if scriptcontext.doc.Objects.Replace(curve_id, newcurve):
scriptcontext.doc.Views.Redraw()
return curve_id
return scriptcontext.errorhandler()
|
[
"def",
"ExtendCurve",
"(",
"curve_id",
",",
"extension_type",
",",
"side",
",",
"boundary_object_ids",
")",
":",
"curve",
"=",
"rhutil",
".",
"coercecurve",
"(",
"curve_id",
",",
"-",
"1",
",",
"True",
")",
"if",
"extension_type",
"==",
"0",
":",
"extension_type",
"=",
"Rhino",
".",
"Geometry",
".",
"CurveExtensionStyle",
".",
"Line",
"elif",
"extension_type",
"==",
"1",
":",
"extension_type",
"=",
"Rhino",
".",
"Geometry",
".",
"CurveExtensionStyle",
".",
"Arc",
"elif",
"extension_type",
"==",
"2",
":",
"extension_type",
"=",
"Rhino",
".",
"Geometry",
".",
"CurveExtensionStyle",
".",
"Smooth",
"else",
":",
"raise",
"ValueError",
"(",
"\"extension_type must be 0, 1, or 2\"",
")",
"if",
"side",
"==",
"0",
":",
"side",
"=",
"Rhino",
".",
"Geometry",
".",
"CurveEnd",
".",
"Start",
"elif",
"side",
"==",
"1",
":",
"side",
"=",
"Rhino",
".",
"Geometry",
".",
"CurveEnd",
".",
"End",
"elif",
"side",
"==",
"2",
":",
"side",
"=",
"Rhino",
".",
"Geometry",
".",
"CurveEnd",
".",
"Both",
"else",
":",
"raise",
"ValueError",
"(",
"\"side must be 0, 1, or 2\"",
")",
"rhobjs",
"=",
"[",
"rhutil",
".",
"coercerhinoobject",
"(",
"id",
")",
"for",
"id",
"in",
"boundary_object_ids",
"]",
"if",
"not",
"rhobjs",
":",
"raise",
"ValueError",
"(",
"\"boundary_object_ids must contain at least one item\"",
")",
"geometry",
"=",
"[",
"obj",
".",
"Geometry",
"for",
"obj",
"in",
"rhobjs",
"]",
"newcurve",
"=",
"curve",
".",
"Extend",
"(",
"side",
",",
"extension_type",
",",
"geometry",
")",
"if",
"newcurve",
"and",
"newcurve",
".",
"IsValid",
":",
"curve_id",
"=",
"rhutil",
".",
"coerceguid",
"(",
"curve_id",
",",
"True",
")",
"if",
"scriptcontext",
".",
"doc",
".",
"Objects",
".",
"Replace",
"(",
"curve_id",
",",
"newcurve",
")",
":",
"scriptcontext",
".",
"doc",
".",
"Views",
".",
"Redraw",
"(",
")",
"return",
"curve_id",
"return",
"scriptcontext",
".",
"errorhandler",
"(",
")"
] |
https://github.com/mcneel/rhinoscriptsyntax/blob/c49bd0bf24c2513bdcb84d1bf307144489600fd9/Scripts/rhinoscript/curve.py#L2530-L2578
|
|
MegviiDetection/video_analyst
|
f4d1bccb1c698961fed3cb70808f1177fab13bdd
|
videoanalyst/data/sampler/sampler_base.py
|
python
|
SamplerBase.update_params
|
(self)
|
r"""
an interface for update params
|
r"""
an interface for update params
|
[
"r",
"an",
"interface",
"for",
"update",
"params"
] |
def update_params(self) -> None:
r"""
an interface for update params
"""
|
[
"def",
"update_params",
"(",
"self",
")",
"->",
"None",
":"
] |
https://github.com/MegviiDetection/video_analyst/blob/f4d1bccb1c698961fed3cb70808f1177fab13bdd/videoanalyst/data/sampler/sampler_base.py#L79-L82
|
||
dimagi/commcare-hq
|
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
|
corehq/apps/reports/formdetails/readable.py
|
python
|
form_key_filter
|
(key)
|
return True
|
[] |
def form_key_filter(key):
if key in SYSTEM_FIELD_NAMES:
return False
if key.startswith(('#', '@', '_')):
return False
return True
|
[
"def",
"form_key_filter",
"(",
"key",
")",
":",
"if",
"key",
"in",
"SYSTEM_FIELD_NAMES",
":",
"return",
"False",
"if",
"key",
".",
"startswith",
"(",
"(",
"'#'",
",",
"'@'",
",",
"'_'",
")",
")",
":",
"return",
"False",
"return",
"True"
] |
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/reports/formdetails/readable.py#L25-L32
|
|||
ifwe/digsby
|
f5fe00244744aa131e07f09348d10563f3d8fa99
|
digsby/src/gui/imwin/imhub.py
|
python
|
pop_all_hidden
|
()
|
Display all hidden conversations.
|
Display all hidden conversations.
|
[
"Display",
"all",
"hidden",
"conversations",
"."
] |
def pop_all_hidden():
'Display all hidden conversations.'
for contact in list(hidden_windows.keys()):
pop_any_hidden(contact)
|
[
"def",
"pop_all_hidden",
"(",
")",
":",
"for",
"contact",
"in",
"list",
"(",
"hidden_windows",
".",
"keys",
"(",
")",
")",
":",
"pop_any_hidden",
"(",
"contact",
")"
] |
https://github.com/ifwe/digsby/blob/f5fe00244744aa131e07f09348d10563f3d8fa99/digsby/src/gui/imwin/imhub.py#L647-L651
|
||
shirgur/PointerNet
|
c7ed05066fb40d4f1832dd24de28da259293983c
|
Data_Generator.py
|
python
|
TSPDataset._generate_data
|
(self)
|
return {'Points_List':points_list, 'Solutions':solutions}
|
:return: Set of points_list ans their One-Hot vector solutions
|
:return: Set of points_list ans their One-Hot vector solutions
|
[
":",
"return",
":",
"Set",
"of",
"points_list",
"ans",
"their",
"One",
"-",
"Hot",
"vector",
"solutions"
] |
def _generate_data(self):
"""
:return: Set of points_list ans their One-Hot vector solutions
"""
points_list = []
solutions = []
data_iter = tqdm(range(self.data_size), unit='data')
for i, _ in enumerate(data_iter):
data_iter.set_description('Data points %i/%i' % (i+1, self.data_size))
points_list.append(np.random.random((self.seq_len, 2)))
solutions_iter = tqdm(points_list, unit='solve')
if self.solve:
for i, points in enumerate(solutions_iter):
solutions_iter.set_description('Solved %i/%i' % (i+1, len(points_list)))
solutions.append(self.solver(points))
else:
solutions = None
return {'Points_List':points_list, 'Solutions':solutions}
|
[
"def",
"_generate_data",
"(",
"self",
")",
":",
"points_list",
"=",
"[",
"]",
"solutions",
"=",
"[",
"]",
"data_iter",
"=",
"tqdm",
"(",
"range",
"(",
"self",
".",
"data_size",
")",
",",
"unit",
"=",
"'data'",
")",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"data_iter",
")",
":",
"data_iter",
".",
"set_description",
"(",
"'Data points %i/%i'",
"%",
"(",
"i",
"+",
"1",
",",
"self",
".",
"data_size",
")",
")",
"points_list",
".",
"append",
"(",
"np",
".",
"random",
".",
"random",
"(",
"(",
"self",
".",
"seq_len",
",",
"2",
")",
")",
")",
"solutions_iter",
"=",
"tqdm",
"(",
"points_list",
",",
"unit",
"=",
"'solve'",
")",
"if",
"self",
".",
"solve",
":",
"for",
"i",
",",
"points",
"in",
"enumerate",
"(",
"solutions_iter",
")",
":",
"solutions_iter",
".",
"set_description",
"(",
"'Solved %i/%i'",
"%",
"(",
"i",
"+",
"1",
",",
"len",
"(",
"points_list",
")",
")",
")",
"solutions",
".",
"append",
"(",
"self",
".",
"solver",
"(",
"points",
")",
")",
"else",
":",
"solutions",
"=",
"None",
"return",
"{",
"'Points_List'",
":",
"points_list",
",",
"'Solutions'",
":",
"solutions",
"}"
] |
https://github.com/shirgur/PointerNet/blob/c7ed05066fb40d4f1832dd24de28da259293983c/Data_Generator.py#L61-L79
|
|
aws-samples/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
src/kubernetes/client/models/v1_status.py
|
python
|
V1Status.message
|
(self, message)
|
Sets the message of this V1Status.
A human-readable description of the status of this operation.
:param message: The message of this V1Status.
:type: str
|
Sets the message of this V1Status.
A human-readable description of the status of this operation.
|
[
"Sets",
"the",
"message",
"of",
"this",
"V1Status",
".",
"A",
"human",
"-",
"readable",
"description",
"of",
"the",
"status",
"of",
"this",
"operation",
"."
] |
def message(self, message):
"""
Sets the message of this V1Status.
A human-readable description of the status of this operation.
:param message: The message of this V1Status.
:type: str
"""
self._message = message
|
[
"def",
"message",
"(",
"self",
",",
"message",
")",
":",
"self",
".",
"_message",
"=",
"message"
] |
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/v1_status.py#L168-L177
|
||
nerdvegas/rez
|
d392c65bf63b4bca8106f938cec49144ba54e770
|
src/rez/package_filter.py
|
python
|
Rule._parse
|
(cls, txt)
|
Create a rule from a string.
Returns:
`Rule` instance, or None if the string does not represent an instance
of this rule type.
|
Create a rule from a string.
|
[
"Create",
"a",
"rule",
"from",
"a",
"string",
"."
] |
def _parse(cls, txt):
"""Create a rule from a string.
Returns:
`Rule` instance, or None if the string does not represent an instance
of this rule type.
"""
raise NotImplementedError
|
[
"def",
"_parse",
"(",
"cls",
",",
"txt",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/nerdvegas/rez/blob/d392c65bf63b4bca8106f938cec49144ba54e770/src/rez/package_filter.py#L377-L384
|
||
playframework/play1
|
0ecac3bc2421ae2dbec27a368bf671eda1c9cba5
|
python/Lib/imaplib.py
|
python
|
IMAP4.proxyauth
|
(self, user)
|
return self._simple_command('PROXYAUTH', user)
|
Assume authentication as "user".
Allows an authorised administrator to proxy into any user's
mailbox.
(typ, [data]) = <instance>.proxyauth(user)
|
Assume authentication as "user".
|
[
"Assume",
"authentication",
"as",
"user",
"."
] |
def proxyauth(self, user):
"""Assume authentication as "user".
Allows an authorised administrator to proxy into any user's
mailbox.
(typ, [data]) = <instance>.proxyauth(user)
"""
name = 'PROXYAUTH'
return self._simple_command('PROXYAUTH', user)
|
[
"def",
"proxyauth",
"(",
"self",
",",
"user",
")",
":",
"name",
"=",
"'PROXYAUTH'",
"return",
"self",
".",
"_simple_command",
"(",
"'PROXYAUTH'",
",",
"user",
")"
] |
https://github.com/playframework/play1/blob/0ecac3bc2421ae2dbec27a368bf671eda1c9cba5/python/Lib/imaplib.py#L608-L618
|
|
pculture/miro
|
d8e4594441939514dd2ac29812bf37087bb3aea5
|
tv/lib/databaseupgrade.py
|
python
|
run_on_both
|
(func)
|
return func
|
decorator to run an upgrade function for both device databases and the
main database
|
decorator to run an upgrade function for both device databases and the
main database
|
[
"decorator",
"to",
"run",
"an",
"upgrade",
"function",
"for",
"both",
"device",
"databases",
"and",
"the",
"main",
"database"
] |
def run_on_both(func):
"""decorator to run an upgrade function for both device databases and the
main database
"""
func._contexts = set(['device', 'main'])
return func
|
[
"def",
"run_on_both",
"(",
"func",
")",
":",
"func",
".",
"_contexts",
"=",
"set",
"(",
"[",
"'device'",
",",
"'main'",
"]",
")",
"return",
"func"
] |
https://github.com/pculture/miro/blob/d8e4594441939514dd2ac29812bf37087bb3aea5/tv/lib/databaseupgrade.py#L215-L220
|
|
sahana/eden
|
1696fa50e90ce967df69f66b571af45356cc18da
|
modules/templates/locations/DO/config.py
|
python
|
config
|
(settings)
|
Template settings for Dominican Republic
- designed to be used in a Cascade with an application template
|
Template settings for Dominican Republic
- designed to be used in a Cascade with an application template
|
[
"Template",
"settings",
"for",
"Dominican",
"Republic",
"-",
"designed",
"to",
"be",
"used",
"in",
"a",
"Cascade",
"with",
"an",
"application",
"template"
] |
def config(settings):
"""
Template settings for Dominican Republic
- designed to be used in a Cascade with an application template
"""
#T = current.T
# Pre-Populate
settings.base.prepopulate.append("locations/DO")
# Restrict to specific country/countries
settings.gis.countries.append("DO")
# Dosable the Postcode selector in the LocationSelector
#settings.gis.postcode_selector = False
# L10n (Localization) settings
settings.L10n.languages["es"] = "Spanish"
# Default Language (put this in custom template if-required)
#settings.L10n.default_language = "es"
# Default timezone for users
settings.L10n.timezone = "America/Santo_Domingo"
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
settings.fin.currencies["DOP"] = "Pesos"
settings.fin.currency_default = "DOP"
|
[
"def",
"config",
"(",
"settings",
")",
":",
"#T = current.T",
"# Pre-Populate",
"settings",
".",
"base",
".",
"prepopulate",
".",
"append",
"(",
"\"locations/DO\"",
")",
"# Restrict to specific country/countries",
"settings",
".",
"gis",
".",
"countries",
".",
"append",
"(",
"\"DO\"",
")",
"# Dosable the Postcode selector in the LocationSelector",
"#settings.gis.postcode_selector = False",
"# L10n (Localization) settings",
"settings",
".",
"L10n",
".",
"languages",
"[",
"\"es\"",
"]",
"=",
"\"Spanish\"",
"# Default Language (put this in custom template if-required)",
"#settings.L10n.default_language = \"es\"",
"# Default timezone for users",
"settings",
".",
"L10n",
".",
"timezone",
"=",
"\"America/Santo_Domingo\"",
"# Default Country Code for telephone numbers",
"settings",
".",
"L10n",
".",
"default_country_code",
"=",
"1",
"settings",
".",
"fin",
".",
"currencies",
"[",
"\"DOP\"",
"]",
"=",
"\"Pesos\"",
"settings",
".",
"fin",
".",
"currency_default",
"=",
"\"DOP\""
] |
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/templates/locations/DO/config.py#L5-L31
|
||
theotherp/nzbhydra
|
4b03d7f769384b97dfc60dade4806c0fc987514e
|
libs/cherrypy/lib/cpstats.py
|
python
|
StatsTool.record_start
|
(self)
|
Record the beginning of a request.
|
Record the beginning of a request.
|
[
"Record",
"the",
"beginning",
"of",
"a",
"request",
"."
] |
def record_start(self):
"""Record the beginning of a request."""
request = cherrypy.serving.request
if not hasattr(request.rfile, 'bytes_read'):
request.rfile = ByteCountWrapper(request.rfile)
request.body.fp = request.rfile
r = request.remote
appstats['Current Requests'] += 1
appstats['Total Requests'] += 1
appstats['Requests'][threading._get_ident()] = {
'Bytes Read': None,
'Bytes Written': None,
# Use a lambda so the ip gets updated by tools.proxy later
'Client': lambda s: '%s:%s' % (r.ip, r.port),
'End Time': None,
'Processing Time': proc_time,
'Request-Line': request.request_line,
'Response Status': None,
'Start Time': time.time(),
}
|
[
"def",
"record_start",
"(",
"self",
")",
":",
"request",
"=",
"cherrypy",
".",
"serving",
".",
"request",
"if",
"not",
"hasattr",
"(",
"request",
".",
"rfile",
",",
"'bytes_read'",
")",
":",
"request",
".",
"rfile",
"=",
"ByteCountWrapper",
"(",
"request",
".",
"rfile",
")",
"request",
".",
"body",
".",
"fp",
"=",
"request",
".",
"rfile",
"r",
"=",
"request",
".",
"remote",
"appstats",
"[",
"'Current Requests'",
"]",
"+=",
"1",
"appstats",
"[",
"'Total Requests'",
"]",
"+=",
"1",
"appstats",
"[",
"'Requests'",
"]",
"[",
"threading",
".",
"_get_ident",
"(",
")",
"]",
"=",
"{",
"'Bytes Read'",
":",
"None",
",",
"'Bytes Written'",
":",
"None",
",",
"# Use a lambda so the ip gets updated by tools.proxy later",
"'Client'",
":",
"lambda",
"s",
":",
"'%s:%s'",
"%",
"(",
"r",
".",
"ip",
",",
"r",
".",
"port",
")",
",",
"'End Time'",
":",
"None",
",",
"'Processing Time'",
":",
"proc_time",
",",
"'Request-Line'",
":",
"request",
".",
"request_line",
",",
"'Response Status'",
":",
"None",
",",
"'Start Time'",
":",
"time",
".",
"time",
"(",
")",
",",
"}"
] |
https://github.com/theotherp/nzbhydra/blob/4b03d7f769384b97dfc60dade4806c0fc987514e/libs/cherrypy/lib/cpstats.py#L314-L335
|
||
virtuald/pyhcl
|
133d69d80266100b091e8d8cf3a61023659ea947
|
src/hcl/parser.py
|
python
|
HclParser.p_listitems_4
|
(self, p)
|
listitems : objectkey list
|
listitems : objectkey list
|
[
"listitems",
":",
"objectkey",
"list"
] |
def p_listitems_4(self, p):
'''
listitems : objectkey list
'''
if DEBUG:
self.print_p(p)
p[2].insert(0, p[1])
p[0] = p[2]
|
[
"def",
"p_listitems_4",
"(",
"self",
",",
"p",
")",
":",
"if",
"DEBUG",
":",
"self",
".",
"print_p",
"(",
"p",
")",
"p",
"[",
"2",
"]",
".",
"insert",
"(",
"0",
",",
"p",
"[",
"1",
"]",
")",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"2",
"]"
] |
https://github.com/virtuald/pyhcl/blob/133d69d80266100b091e8d8cf3a61023659ea947/src/hcl/parser.py#L468-L475
|
||
devAmoghS/Machine-Learning-with-Python
|
4bd52836f48e1e52b878d64a39e89c31dc47bfb1
|
natural_language_processing/utils.py
|
python
|
topic_weight
|
(d, word, k)
|
return p_word_given_topic(word, k) * p_topic_given_document(k, d)
|
given a document and a word in that document,
return the weight for the k-th topic
|
given a document and a word in that document,
return the weight for the k-th topic
|
[
"given",
"a",
"document",
"and",
"a",
"word",
"in",
"that",
"document",
"return",
"the",
"weight",
"for",
"the",
"k",
"-",
"th",
"topic"
] |
def topic_weight(d, word, k):
"""given a document and a word in that document,
return the weight for the k-th topic"""
return p_word_given_topic(word, k) * p_topic_given_document(k, d)
|
[
"def",
"topic_weight",
"(",
"d",
",",
"word",
",",
"k",
")",
":",
"return",
"p_word_given_topic",
"(",
"word",
",",
"k",
")",
"*",
"p_topic_given_document",
"(",
"k",
",",
"d",
")"
] |
https://github.com/devAmoghS/Machine-Learning-with-Python/blob/4bd52836f48e1e52b878d64a39e89c31dc47bfb1/natural_language_processing/utils.py#L194-L197
|
|
Yelp/mrjob
|
091572e87bc24cc64be40278dd0f5c3617c98d4b
|
mrjob/protocol.py
|
python
|
_KeyCachingProtocol._loads
|
(self, value)
|
Decode a single key/value, and return it.
|
Decode a single key/value, and return it.
|
[
"Decode",
"a",
"single",
"key",
"/",
"value",
"and",
"return",
"it",
"."
] |
def _loads(self, value):
"""Decode a single key/value, and return it."""
raise NotImplementedError
|
[
"def",
"_loads",
"(",
"self",
",",
"value",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/Yelp/mrjob/blob/091572e87bc24cc64be40278dd0f5c3617c98d4b/mrjob/protocol.py#L75-L77
|
||
yzhao062/pyod
|
13b0cd5f50d5ea5c5321da88c46232ae6f24dff7
|
pyod/models/pca.py
|
python
|
PCA.fit
|
(self, X, y=None)
|
return self
|
Fit detector. y is ignored in unsupervised methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
|
Fit detector. y is ignored in unsupervised methods.
|
[
"Fit",
"detector",
".",
"y",
"is",
"ignored",
"in",
"unsupervised",
"methods",
"."
] |
def fit(self, X, y=None):
"""Fit detector. y is ignored in unsupervised methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
# validate inputs X and y (optional)
X = check_array(X)
self._set_n_classes(y)
# PCA is recommended to use on the standardized data (zero mean and
# unit variance).
if self.standardization:
X, self.scaler_ = standardizer(X, keep_scalar=True)
self.detector_ = sklearn_PCA(n_components=self.n_components,
copy=self.copy,
whiten=self.whiten,
svd_solver=self.svd_solver,
tol=self.tol,
iterated_power=self.iterated_power,
random_state=self.random_state)
self.detector_.fit(X=X, y=y)
# copy the attributes from the sklearn PCA object
self.n_components_ = self.detector_.n_components_
self.components_ = self.detector_.components_
# validate the number of components to be used for outlier detection
if self.n_selected_components is None:
self.n_selected_components_ = self.n_components_
else:
self.n_selected_components_ = self.n_selected_components
check_parameter(self.n_selected_components_, 1, self.n_components_,
include_left=True, include_right=True,
param_name='n_selected_components_')
# use eigenvalues as the weights of eigenvectors
self.w_components_ = np.ones([self.n_components_, ])
if self.weighted:
self.w_components_ = self.detector_.explained_variance_ratio_
# outlier scores is the sum of the weighted distances between each
# sample to the eigenvectors. The eigenvectors with smaller
# eigenvalues have more influence
# Not all eigenvectors are used, only n_selected_components_ smallest
# are used since they better reflect the variance change
self.selected_components_ = self.components_[
-1 * self.n_selected_components_:, :]
self.selected_w_components_ = self.w_components_[
-1 * self.n_selected_components_:]
self.decision_scores_ = np.sum(
cdist(X, self.selected_components_) / self.selected_w_components_,
axis=1).ravel()
self._process_decision_scores()
return self
|
[
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"# validate inputs X and y (optional)",
"X",
"=",
"check_array",
"(",
"X",
")",
"self",
".",
"_set_n_classes",
"(",
"y",
")",
"# PCA is recommended to use on the standardized data (zero mean and",
"# unit variance).",
"if",
"self",
".",
"standardization",
":",
"X",
",",
"self",
".",
"scaler_",
"=",
"standardizer",
"(",
"X",
",",
"keep_scalar",
"=",
"True",
")",
"self",
".",
"detector_",
"=",
"sklearn_PCA",
"(",
"n_components",
"=",
"self",
".",
"n_components",
",",
"copy",
"=",
"self",
".",
"copy",
",",
"whiten",
"=",
"self",
".",
"whiten",
",",
"svd_solver",
"=",
"self",
".",
"svd_solver",
",",
"tol",
"=",
"self",
".",
"tol",
",",
"iterated_power",
"=",
"self",
".",
"iterated_power",
",",
"random_state",
"=",
"self",
".",
"random_state",
")",
"self",
".",
"detector_",
".",
"fit",
"(",
"X",
"=",
"X",
",",
"y",
"=",
"y",
")",
"# copy the attributes from the sklearn PCA object",
"self",
".",
"n_components_",
"=",
"self",
".",
"detector_",
".",
"n_components_",
"self",
".",
"components_",
"=",
"self",
".",
"detector_",
".",
"components_",
"# validate the number of components to be used for outlier detection",
"if",
"self",
".",
"n_selected_components",
"is",
"None",
":",
"self",
".",
"n_selected_components_",
"=",
"self",
".",
"n_components_",
"else",
":",
"self",
".",
"n_selected_components_",
"=",
"self",
".",
"n_selected_components",
"check_parameter",
"(",
"self",
".",
"n_selected_components_",
",",
"1",
",",
"self",
".",
"n_components_",
",",
"include_left",
"=",
"True",
",",
"include_right",
"=",
"True",
",",
"param_name",
"=",
"'n_selected_components_'",
")",
"# use eigenvalues as the weights of eigenvectors",
"self",
".",
"w_components_",
"=",
"np",
".",
"ones",
"(",
"[",
"self",
".",
"n_components_",
",",
"]",
")",
"if",
"self",
".",
"weighted",
":",
"self",
".",
"w_components_",
"=",
"self",
".",
"detector_",
".",
"explained_variance_ratio_",
"# outlier scores is the sum of the weighted distances between each",
"# sample to the eigenvectors. The eigenvectors with smaller",
"# eigenvalues have more influence",
"# Not all eigenvectors are used, only n_selected_components_ smallest",
"# are used since they better reflect the variance change",
"self",
".",
"selected_components_",
"=",
"self",
".",
"components_",
"[",
"-",
"1",
"*",
"self",
".",
"n_selected_components_",
":",
",",
":",
"]",
"self",
".",
"selected_w_components_",
"=",
"self",
".",
"w_components_",
"[",
"-",
"1",
"*",
"self",
".",
"n_selected_components_",
":",
"]",
"self",
".",
"decision_scores_",
"=",
"np",
".",
"sum",
"(",
"cdist",
"(",
"X",
",",
"self",
".",
"selected_components_",
")",
"/",
"self",
".",
"selected_w_components_",
",",
"axis",
"=",
"1",
")",
".",
"ravel",
"(",
")",
"self",
".",
"_process_decision_scores",
"(",
")",
"return",
"self"
] |
https://github.com/yzhao062/pyod/blob/13b0cd5f50d5ea5c5321da88c46232ae6f24dff7/pyod/models/pca.py#L205-L273
|
|
sharppy/SHARPpy
|
19175269ab11fe06c917b5d10376862a4716e1db
|
sharppy/sharptab/params.py
|
python
|
dcp
|
(prof)
|
return dcp
|
Derecho Composite Parameter (*)
This parameter is based on a data set of 113 derecho events compiled by Evans and Doswell (2001).
The DCP was developed to identify environments considered favorable for cold pool "driven" wind
events through four primary mechanisms:
1) Cold pool production [DCAPE]
2) Ability to sustain strong storms along the leading edge of a gust front [MUCAPE]
3) Organization potential for any ensuing convection [0-6 km shear]
4) Sufficient flow within the ambient environment to favor development along downstream portion of the gust front [0-6 km mean wind].
This index is fomulated as follows:
DCP = (DCAPE/980)*(MUCAPE/2000)*(0-6 km shear/20 kt)*(0-6 km mean wind/16 kt)
Reference:
Evans, J.S., and C.A. Doswell, 2001: Examination of derecho environments using proximity soundings. Wea. Forecasting, 16, 329-342.
Parameters
----------
prof : profile object
Profile object
Returns
-------
dcp : number
Derecho Composite Parameter (unitless)
|
Derecho Composite Parameter (*)
|
[
"Derecho",
"Composite",
"Parameter",
"(",
"*",
")"
] |
def dcp(prof):
'''
Derecho Composite Parameter (*)
This parameter is based on a data set of 113 derecho events compiled by Evans and Doswell (2001).
The DCP was developed to identify environments considered favorable for cold pool "driven" wind
events through four primary mechanisms:
1) Cold pool production [DCAPE]
2) Ability to sustain strong storms along the leading edge of a gust front [MUCAPE]
3) Organization potential for any ensuing convection [0-6 km shear]
4) Sufficient flow within the ambient environment to favor development along downstream portion of the gust front [0-6 km mean wind].
This index is fomulated as follows:
DCP = (DCAPE/980)*(MUCAPE/2000)*(0-6 km shear/20 kt)*(0-6 km mean wind/16 kt)
Reference:
Evans, J.S., and C.A. Doswell, 2001: Examination of derecho environments using proximity soundings. Wea. Forecasting, 16, 329-342.
Parameters
----------
prof : profile object
Profile object
Returns
-------
dcp : number
Derecho Composite Parameter (unitless)
'''
sfc = prof.pres[prof.sfc]
p6km = interp.pres(prof, interp.to_msl(prof, 6000.))
dcape_val = getattr(prof, 'dcape', dcape( prof )[0])
mupcl = getattr(prof, 'mupcl', None)
if mupcl is None:
mupcl = parcelx(prof, flag=1)
sfc_6km_shear = getattr(prof, 'sfc_6km_shear', winds.wind_shear(prof, pbot=sfc, ptop=p6km))
mean_6km = getattr(prof, 'mean_6km', utils.comp2vec(*winds.mean_wind(prof, pbot=sfc, ptop=p6km)))
mag_shear = utils.mag(sfc_6km_shear[0], sfc_6km_shear[1])
mag_mean_wind = mean_6km[1]
dcp = (dcape_val/980.) * (mupcl.bplus/2000.) * (mag_shear / 20. ) * (mag_mean_wind / 16.)
return dcp
|
[
"def",
"dcp",
"(",
"prof",
")",
":",
"sfc",
"=",
"prof",
".",
"pres",
"[",
"prof",
".",
"sfc",
"]",
"p6km",
"=",
"interp",
".",
"pres",
"(",
"prof",
",",
"interp",
".",
"to_msl",
"(",
"prof",
",",
"6000.",
")",
")",
"dcape_val",
"=",
"getattr",
"(",
"prof",
",",
"'dcape'",
",",
"dcape",
"(",
"prof",
")",
"[",
"0",
"]",
")",
"mupcl",
"=",
"getattr",
"(",
"prof",
",",
"'mupcl'",
",",
"None",
")",
"if",
"mupcl",
"is",
"None",
":",
"mupcl",
"=",
"parcelx",
"(",
"prof",
",",
"flag",
"=",
"1",
")",
"sfc_6km_shear",
"=",
"getattr",
"(",
"prof",
",",
"'sfc_6km_shear'",
",",
"winds",
".",
"wind_shear",
"(",
"prof",
",",
"pbot",
"=",
"sfc",
",",
"ptop",
"=",
"p6km",
")",
")",
"mean_6km",
"=",
"getattr",
"(",
"prof",
",",
"'mean_6km'",
",",
"utils",
".",
"comp2vec",
"(",
"*",
"winds",
".",
"mean_wind",
"(",
"prof",
",",
"pbot",
"=",
"sfc",
",",
"ptop",
"=",
"p6km",
")",
")",
")",
"mag_shear",
"=",
"utils",
".",
"mag",
"(",
"sfc_6km_shear",
"[",
"0",
"]",
",",
"sfc_6km_shear",
"[",
"1",
"]",
")",
"mag_mean_wind",
"=",
"mean_6km",
"[",
"1",
"]",
"dcp",
"=",
"(",
"dcape_val",
"/",
"980.",
")",
"*",
"(",
"mupcl",
".",
"bplus",
"/",
"2000.",
")",
"*",
"(",
"mag_shear",
"/",
"20.",
")",
"*",
"(",
"mag_mean_wind",
"/",
"16.",
")",
"return",
"dcp"
] |
https://github.com/sharppy/SHARPpy/blob/19175269ab11fe06c917b5d10376862a4716e1db/sharppy/sharptab/params.py#L3029-L3073
|
|
Cadene/tensorflow-model-zoo.torch
|
990b10ffc22d4c8eacb2a502f20415b4f70c74c2
|
models/research/real_nvp/real_nvp_multiscale_dataset.py
|
python
|
RealNVP.eval_epoch
|
(self, hps)
|
return float(numpy.mean(eval_costs))
|
Evaluate bits/dim.
|
Evaluate bits/dim.
|
[
"Evaluate",
"bits",
"/",
"dim",
"."
] |
def eval_epoch(self, hps):
"""Evaluate bits/dim."""
n_eval_dict = {
"imnet": 50000,
"lsun": 300,
"celeba": 19962,
"svhn": 26032,
}
if FLAGS.eval_set_size == 0:
num_examples_eval = n_eval_dict[FLAGS.dataset]
else:
num_examples_eval = FLAGS.eval_set_size
n_epoch = num_examples_eval / hps.batch_size
eval_costs = []
bar_len = 70
for epoch_idx in xrange(n_epoch):
n_equal = epoch_idx * bar_len * 1. / n_epoch
n_equal = numpy.ceil(n_equal)
n_equal = int(n_equal)
n_dash = bar_len - n_equal
progress_bar = "[" + "=" * n_equal + "-" * n_dash + "]\r"
print progress_bar,
cost = self.bit_per_dim.eval()
eval_costs.append(cost)
print ""
return float(numpy.mean(eval_costs))
|
[
"def",
"eval_epoch",
"(",
"self",
",",
"hps",
")",
":",
"n_eval_dict",
"=",
"{",
"\"imnet\"",
":",
"50000",
",",
"\"lsun\"",
":",
"300",
",",
"\"celeba\"",
":",
"19962",
",",
"\"svhn\"",
":",
"26032",
",",
"}",
"if",
"FLAGS",
".",
"eval_set_size",
"==",
"0",
":",
"num_examples_eval",
"=",
"n_eval_dict",
"[",
"FLAGS",
".",
"dataset",
"]",
"else",
":",
"num_examples_eval",
"=",
"FLAGS",
".",
"eval_set_size",
"n_epoch",
"=",
"num_examples_eval",
"/",
"hps",
".",
"batch_size",
"eval_costs",
"=",
"[",
"]",
"bar_len",
"=",
"70",
"for",
"epoch_idx",
"in",
"xrange",
"(",
"n_epoch",
")",
":",
"n_equal",
"=",
"epoch_idx",
"*",
"bar_len",
"*",
"1.",
"/",
"n_epoch",
"n_equal",
"=",
"numpy",
".",
"ceil",
"(",
"n_equal",
")",
"n_equal",
"=",
"int",
"(",
"n_equal",
")",
"n_dash",
"=",
"bar_len",
"-",
"n_equal",
"progress_bar",
"=",
"\"[\"",
"+",
"\"=\"",
"*",
"n_equal",
"+",
"\"-\"",
"*",
"n_dash",
"+",
"\"]\\r\"",
"print",
"progress_bar",
",",
"cost",
"=",
"self",
".",
"bit_per_dim",
".",
"eval",
"(",
")",
"eval_costs",
".",
"append",
"(",
"cost",
")",
"print",
"\"\"",
"return",
"float",
"(",
"numpy",
".",
"mean",
"(",
"eval_costs",
")",
")"
] |
https://github.com/Cadene/tensorflow-model-zoo.torch/blob/990b10ffc22d4c8eacb2a502f20415b4f70c74c2/models/research/real_nvp/real_nvp_multiscale_dataset.py#L1417-L1442
|
|
antonylesuisse/qweb
|
2d6964a3e5cae90414c4f873eb770591f569dfe0
|
qweb_python/qweb/fcgi.py
|
python
|
Record._recvall
|
(sock, length)
|
return ''.join(dataList), recvLen
|
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
|
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
|
[
"Attempts",
"to",
"receive",
"length",
"bytes",
"from",
"a",
"socket",
"blocking",
"if",
"necessary",
".",
"(",
"Socket",
"may",
"be",
"blocking",
"or",
"non",
"-",
"blocking",
".",
")"
] |
def _recvall(sock, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
dataList = []
recvLen = 0
while length:
try:
data = sock.recv(length)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
return ''.join(dataList), recvLen
|
[
"def",
"_recvall",
"(",
"sock",
",",
"length",
")",
":",
"dataList",
"=",
"[",
"]",
"recvLen",
"=",
"0",
"while",
"length",
":",
"try",
":",
"data",
"=",
"sock",
".",
"recv",
"(",
"length",
")",
"except",
"socket",
".",
"error",
",",
"e",
":",
"if",
"e",
"[",
"0",
"]",
"==",
"errno",
".",
"EAGAIN",
":",
"select",
".",
"select",
"(",
"[",
"sock",
"]",
",",
"[",
"]",
",",
"[",
"]",
")",
"continue",
"else",
":",
"raise",
"if",
"not",
"data",
":",
"# EOF",
"break",
"dataList",
".",
"append",
"(",
"data",
")",
"dataLen",
"=",
"len",
"(",
"data",
")",
"recvLen",
"+=",
"dataLen",
"length",
"-=",
"dataLen",
"return",
"''",
".",
"join",
"(",
"dataList",
")",
",",
"recvLen"
] |
https://github.com/antonylesuisse/qweb/blob/2d6964a3e5cae90414c4f873eb770591f569dfe0/qweb_python/qweb/fcgi.py#L464-L486
|
|
flopp/GpxTrackPoster
|
d7ff0ba61f6396938efd075d841a7c4a226a6f5d
|
gpxtrackposter/poster.py
|
python
|
Poster._draw_footer
|
(self, d: svgwrite.Drawing)
|
[] |
def _draw_footer(self, d: svgwrite.Drawing) -> None:
g = d.g(id="footer")
d.add(g)
text_color = self.colors["text"]
header_style = "font-size:4px; font-family:Arial"
value_style = "font-size:9px; font-family:Arial"
small_value_style = "font-size:3px; font-family:Arial"
(
total_length,
average_length,
length_range,
weeks,
) = self._compute_track_statistics()
g.add(
d.text(
self.translate("ATHLETE"),
insert=(10, self.height - 20),
fill=text_color,
style=header_style,
)
)
g.add(
d.text(
self._athlete,
insert=(10, self.height - 10),
fill=text_color,
style=value_style,
)
)
g.add(
d.text(
self.translate("STATISTICS"),
insert=(120, self.height - 20),
fill=text_color,
style=header_style,
)
)
g.add(
d.text(
self.translate("Number") + f": {len(self.tracks)}",
insert=(120, self.height - 15),
fill=text_color,
style=small_value_style,
)
)
g.add(
d.text(
self.translate("Weekly") + ": " + format_float(len(self.tracks) / weeks),
insert=(120, self.height - 10),
fill=text_color,
style=small_value_style,
)
)
g.add(
d.text(
self.translate("Total") + ": " + self.format_distance(total_length),
insert=(141, self.height - 15),
fill=text_color,
style=small_value_style,
)
)
g.add(
d.text(
self.translate("Avg") + ": " + self.format_distance(average_length),
insert=(141, self.height - 10),
fill=text_color,
style=small_value_style,
)
)
if length_range.is_valid():
min_length = length_range.lower()
max_length = length_range.upper()
assert min_length is not None
assert max_length is not None
else:
min_length = pint.quantity.Quantity(0.0)
max_length = pint.quantity.Quantity(0.0)
g.add(
d.text(
self.translate("Min") + ": " + self.format_distance(min_length),
insert=(167, self.height - 15),
fill=text_color,
style=small_value_style,
)
)
g.add(
d.text(
self.translate("Max") + ": " + self.format_distance(max_length),
insert=(167, self.height - 10),
fill=text_color,
style=small_value_style,
)
)
|
[
"def",
"_draw_footer",
"(",
"self",
",",
"d",
":",
"svgwrite",
".",
"Drawing",
")",
"->",
"None",
":",
"g",
"=",
"d",
".",
"g",
"(",
"id",
"=",
"\"footer\"",
")",
"d",
".",
"add",
"(",
"g",
")",
"text_color",
"=",
"self",
".",
"colors",
"[",
"\"text\"",
"]",
"header_style",
"=",
"\"font-size:4px; font-family:Arial\"",
"value_style",
"=",
"\"font-size:9px; font-family:Arial\"",
"small_value_style",
"=",
"\"font-size:3px; font-family:Arial\"",
"(",
"total_length",
",",
"average_length",
",",
"length_range",
",",
"weeks",
",",
")",
"=",
"self",
".",
"_compute_track_statistics",
"(",
")",
"g",
".",
"add",
"(",
"d",
".",
"text",
"(",
"self",
".",
"translate",
"(",
"\"ATHLETE\"",
")",
",",
"insert",
"=",
"(",
"10",
",",
"self",
".",
"height",
"-",
"20",
")",
",",
"fill",
"=",
"text_color",
",",
"style",
"=",
"header_style",
",",
")",
")",
"g",
".",
"add",
"(",
"d",
".",
"text",
"(",
"self",
".",
"_athlete",
",",
"insert",
"=",
"(",
"10",
",",
"self",
".",
"height",
"-",
"10",
")",
",",
"fill",
"=",
"text_color",
",",
"style",
"=",
"value_style",
",",
")",
")",
"g",
".",
"add",
"(",
"d",
".",
"text",
"(",
"self",
".",
"translate",
"(",
"\"STATISTICS\"",
")",
",",
"insert",
"=",
"(",
"120",
",",
"self",
".",
"height",
"-",
"20",
")",
",",
"fill",
"=",
"text_color",
",",
"style",
"=",
"header_style",
",",
")",
")",
"g",
".",
"add",
"(",
"d",
".",
"text",
"(",
"self",
".",
"translate",
"(",
"\"Number\"",
")",
"+",
"f\": {len(self.tracks)}\"",
",",
"insert",
"=",
"(",
"120",
",",
"self",
".",
"height",
"-",
"15",
")",
",",
"fill",
"=",
"text_color",
",",
"style",
"=",
"small_value_style",
",",
")",
")",
"g",
".",
"add",
"(",
"d",
".",
"text",
"(",
"self",
".",
"translate",
"(",
"\"Weekly\"",
")",
"+",
"\": \"",
"+",
"format_float",
"(",
"len",
"(",
"self",
".",
"tracks",
")",
"/",
"weeks",
")",
",",
"insert",
"=",
"(",
"120",
",",
"self",
".",
"height",
"-",
"10",
")",
",",
"fill",
"=",
"text_color",
",",
"style",
"=",
"small_value_style",
",",
")",
")",
"g",
".",
"add",
"(",
"d",
".",
"text",
"(",
"self",
".",
"translate",
"(",
"\"Total\"",
")",
"+",
"\": \"",
"+",
"self",
".",
"format_distance",
"(",
"total_length",
")",
",",
"insert",
"=",
"(",
"141",
",",
"self",
".",
"height",
"-",
"15",
")",
",",
"fill",
"=",
"text_color",
",",
"style",
"=",
"small_value_style",
",",
")",
")",
"g",
".",
"add",
"(",
"d",
".",
"text",
"(",
"self",
".",
"translate",
"(",
"\"Avg\"",
")",
"+",
"\": \"",
"+",
"self",
".",
"format_distance",
"(",
"average_length",
")",
",",
"insert",
"=",
"(",
"141",
",",
"self",
".",
"height",
"-",
"10",
")",
",",
"fill",
"=",
"text_color",
",",
"style",
"=",
"small_value_style",
",",
")",
")",
"if",
"length_range",
".",
"is_valid",
"(",
")",
":",
"min_length",
"=",
"length_range",
".",
"lower",
"(",
")",
"max_length",
"=",
"length_range",
".",
"upper",
"(",
")",
"assert",
"min_length",
"is",
"not",
"None",
"assert",
"max_length",
"is",
"not",
"None",
"else",
":",
"min_length",
"=",
"pint",
".",
"quantity",
".",
"Quantity",
"(",
"0.0",
")",
"max_length",
"=",
"pint",
".",
"quantity",
".",
"Quantity",
"(",
"0.0",
")",
"g",
".",
"add",
"(",
"d",
".",
"text",
"(",
"self",
".",
"translate",
"(",
"\"Min\"",
")",
"+",
"\": \"",
"+",
"self",
".",
"format_distance",
"(",
"min_length",
")",
",",
"insert",
"=",
"(",
"167",
",",
"self",
".",
"height",
"-",
"15",
")",
",",
"fill",
"=",
"text_color",
",",
"style",
"=",
"small_value_style",
",",
")",
")",
"g",
".",
"add",
"(",
"d",
".",
"text",
"(",
"self",
".",
"translate",
"(",
"\"Max\"",
")",
"+",
"\": \"",
"+",
"self",
".",
"format_distance",
"(",
"max_length",
")",
",",
"insert",
"=",
"(",
"167",
",",
"self",
".",
"height",
"-",
"10",
")",
",",
"fill",
"=",
"text_color",
",",
"style",
"=",
"small_value_style",
",",
")",
")"
] |
https://github.com/flopp/GpxTrackPoster/blob/d7ff0ba61f6396938efd075d841a7c4a226a6f5d/gpxtrackposter/poster.py#L205-L300
|
||||
facebookincubator/FCR
|
d5a629d062060cfa859b848ff54867e7450c04fd
|
fbnet/command_runner/device_db.py
|
python
|
BaseDeviceDB.wait_for_data
|
(self)
|
Wait for the data to be fetched
|
Wait for the data to be fetched
|
[
"Wait",
"for",
"the",
"data",
"to",
"be",
"fetched"
] |
async def wait_for_data(self):
"""Wait for the data to be fetched"""
while not self._data_valid:
self.logger.info("Waiting for data")
await self.wait()
self.logger.info("Device data valid")
|
[
"async",
"def",
"wait_for_data",
"(",
"self",
")",
":",
"while",
"not",
"self",
".",
"_data_valid",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Waiting for data\"",
")",
"await",
"self",
".",
"wait",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Device data valid\"",
")"
] |
https://github.com/facebookincubator/FCR/blob/d5a629d062060cfa859b848ff54867e7450c04fd/fbnet/command_runner/device_db.py#L89-L94
|
||
dtmilano/AndroidViewClient
|
421b86e3f1a57683557fc0173951cd0332ab43f4
|
src/com/dtmilano/android/viewclient.py
|
python
|
ViewClient.connectToDeviceOrExit
|
(timeout=60, verbose=False, ignoresecuredevice=False, ignoreversioncheck=False,
serialno=None, adbhostname=adbclient.HOSTNAME, adbport=adbclient.PORT,
connect=adbclient.connect)
|
return device, serialno
|
Connects to a device which serial number is obtained from the script arguments if available
or using the default regex C{.*}.
If the connection is not successful the script exits.
History
-------
In MonkeyRunner times, this method was a way of overcoming one of its limitations.
L{MonkeyRunner.waitForConnection()} returns a L{MonkeyDevice} even if the connection failed.
Then, to detect this situation, C{device.wake()} is attempted and if it fails then it is
assumed the previous connection failed.
@type timeout: int
@param timeout: timeout for the connection
@type verbose: bool
@param verbose: Verbose output
@type ignoresecuredevice: bool
@param ignoresecuredevice: Ignores the check for a secure device
@type ignoreversioncheck: bool
@param ignoreversioncheck: Ignores the check for a supported ADB version
@type serialno: str
@param serialno: The device or emulator serial number
@type connect: function
@param connect: Connect method to use to connect to ADB
@return: the device and serialno used for the connection
|
Connects to a device which serial number is obtained from the script arguments if available
or using the default regex C{.*}.
|
[
"Connects",
"to",
"a",
"device",
"which",
"serial",
"number",
"is",
"obtained",
"from",
"the",
"script",
"arguments",
"if",
"available",
"or",
"using",
"the",
"default",
"regex",
"C",
"{",
".",
"*",
"}",
"."
] |
def connectToDeviceOrExit(timeout=60, verbose=False, ignoresecuredevice=False, ignoreversioncheck=False,
serialno=None, adbhostname=adbclient.HOSTNAME, adbport=adbclient.PORT,
connect=adbclient.connect):
"""
Connects to a device which serial number is obtained from the script arguments if available
or using the default regex C{.*}.
If the connection is not successful the script exits.
History
-------
In MonkeyRunner times, this method was a way of overcoming one of its limitations.
L{MonkeyRunner.waitForConnection()} returns a L{MonkeyDevice} even if the connection failed.
Then, to detect this situation, C{device.wake()} is attempted and if it fails then it is
assumed the previous connection failed.
@type timeout: int
@param timeout: timeout for the connection
@type verbose: bool
@param verbose: Verbose output
@type ignoresecuredevice: bool
@param ignoresecuredevice: Ignores the check for a secure device
@type ignoreversioncheck: bool
@param ignoreversioncheck: Ignores the check for a supported ADB version
@type serialno: str
@param serialno: The device or emulator serial number
@type connect: function
@param connect: Connect method to use to connect to ADB
@return: the device and serialno used for the connection
"""
progname = os.path.basename(sys.argv[0])
if serialno is None:
# eat all the extra options the invoking script may have added
args = sys.argv
while len(args) > 1 and args[1][0] == '-':
args.pop(1)
serialno = args[1] if len(args) > 1 else \
os.environ['ANDROID_SERIAL'] if 'ANDROID_SERIAL' in os.environ \
else '.*'
if IP_RE.match(serialno):
# If matches an IP address format and port was not specified add the default
serialno += ':%d' % ADB_DEFAULT_PORT
if verbose:
print('Connecting to a device with serialno=%s with a timeout of %d secs...' % \
(serialno, timeout), file=sys.stderr)
ViewClient.setAlarm(timeout + 5)
# NOTE: timeout is used for 2 different timeouts, the one to set the alarm to timeout the connection with
# adb and the timeout used by adb (once connected) for the sockets
device = adbclient.AdbClient(
serialno,
hostname=adbhostname,
port=adbport,
ignoreversioncheck=ignoreversioncheck,
timeout=timeout,
connect=connect)
ViewClient.setAlarm(0)
if verbose:
print('Connected to device with serialno=%s' % serialno, file=sys.stderr)
secure = device.getSystemProperty('ro.secure')
debuggable = device.getSystemProperty('ro.debuggable')
versionProperty = device.getProperty(VERSION_SDK_PROPERTY)
if versionProperty:
version = int(versionProperty)
else:
if verbose:
print("Couldn't obtain device SDK version")
version = -1
# we are going to use UiAutomator for versions >= 16 that's why we ignore if the device
# is secure if this is true
if secure == '1' and debuggable == '0' and not ignoresecuredevice and version < 16:
print("%s: ERROR: Device is secure, AndroidViewClient won't work." % progname, file=sys.stderr)
if verbose:
print(" secure=%s debuggable=%s version=%d ignoresecuredevice=%s" % \
(secure, debuggable, version, ignoresecuredevice), file=sys.stderr)
sys.exit(2)
if device.serialno:
# If we know the serialno because it was set by AdbClient, use it
serialno = device.serialno
ipPortRE = re.compile(IP_DOMAIN_NAME_PORT_REGEX, re.IGNORECASE)
if re.search("[.*()+]", serialno) and not ipPortRE.match(serialno):
# if a regex was used we have to determine the serialno used
serialno = ViewClient.__obtainDeviceSerialNumber(device)
if verbose:
print('Actual device serialno=%s' % serialno, file=sys.stderr)
return device, serialno
|
[
"def",
"connectToDeviceOrExit",
"(",
"timeout",
"=",
"60",
",",
"verbose",
"=",
"False",
",",
"ignoresecuredevice",
"=",
"False",
",",
"ignoreversioncheck",
"=",
"False",
",",
"serialno",
"=",
"None",
",",
"adbhostname",
"=",
"adbclient",
".",
"HOSTNAME",
",",
"adbport",
"=",
"adbclient",
".",
"PORT",
",",
"connect",
"=",
"adbclient",
".",
"connect",
")",
":",
"progname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
"if",
"serialno",
"is",
"None",
":",
"# eat all the extra options the invoking script may have added",
"args",
"=",
"sys",
".",
"argv",
"while",
"len",
"(",
"args",
")",
">",
"1",
"and",
"args",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"'-'",
":",
"args",
".",
"pop",
"(",
"1",
")",
"serialno",
"=",
"args",
"[",
"1",
"]",
"if",
"len",
"(",
"args",
")",
">",
"1",
"else",
"os",
".",
"environ",
"[",
"'ANDROID_SERIAL'",
"]",
"if",
"'ANDROID_SERIAL'",
"in",
"os",
".",
"environ",
"else",
"'.*'",
"if",
"IP_RE",
".",
"match",
"(",
"serialno",
")",
":",
"# If matches an IP address format and port was not specified add the default",
"serialno",
"+=",
"':%d'",
"%",
"ADB_DEFAULT_PORT",
"if",
"verbose",
":",
"print",
"(",
"'Connecting to a device with serialno=%s with a timeout of %d secs...'",
"%",
"(",
"serialno",
",",
"timeout",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"ViewClient",
".",
"setAlarm",
"(",
"timeout",
"+",
"5",
")",
"# NOTE: timeout is used for 2 different timeouts, the one to set the alarm to timeout the connection with",
"# adb and the timeout used by adb (once connected) for the sockets",
"device",
"=",
"adbclient",
".",
"AdbClient",
"(",
"serialno",
",",
"hostname",
"=",
"adbhostname",
",",
"port",
"=",
"adbport",
",",
"ignoreversioncheck",
"=",
"ignoreversioncheck",
",",
"timeout",
"=",
"timeout",
",",
"connect",
"=",
"connect",
")",
"ViewClient",
".",
"setAlarm",
"(",
"0",
")",
"if",
"verbose",
":",
"print",
"(",
"'Connected to device with serialno=%s'",
"%",
"serialno",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"secure",
"=",
"device",
".",
"getSystemProperty",
"(",
"'ro.secure'",
")",
"debuggable",
"=",
"device",
".",
"getSystemProperty",
"(",
"'ro.debuggable'",
")",
"versionProperty",
"=",
"device",
".",
"getProperty",
"(",
"VERSION_SDK_PROPERTY",
")",
"if",
"versionProperty",
":",
"version",
"=",
"int",
"(",
"versionProperty",
")",
"else",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Couldn't obtain device SDK version\"",
")",
"version",
"=",
"-",
"1",
"# we are going to use UiAutomator for versions >= 16 that's why we ignore if the device",
"# is secure if this is true",
"if",
"secure",
"==",
"'1'",
"and",
"debuggable",
"==",
"'0'",
"and",
"not",
"ignoresecuredevice",
"and",
"version",
"<",
"16",
":",
"print",
"(",
"\"%s: ERROR: Device is secure, AndroidViewClient won't work.\"",
"%",
"progname",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"verbose",
":",
"print",
"(",
"\" secure=%s debuggable=%s version=%d ignoresecuredevice=%s\"",
"%",
"(",
"secure",
",",
"debuggable",
",",
"version",
",",
"ignoresecuredevice",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"2",
")",
"if",
"device",
".",
"serialno",
":",
"# If we know the serialno because it was set by AdbClient, use it",
"serialno",
"=",
"device",
".",
"serialno",
"ipPortRE",
"=",
"re",
".",
"compile",
"(",
"IP_DOMAIN_NAME_PORT_REGEX",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"re",
".",
"search",
"(",
"\"[.*()+]\"",
",",
"serialno",
")",
"and",
"not",
"ipPortRE",
".",
"match",
"(",
"serialno",
")",
":",
"# if a regex was used we have to determine the serialno used",
"serialno",
"=",
"ViewClient",
".",
"__obtainDeviceSerialNumber",
"(",
"device",
")",
"if",
"verbose",
":",
"print",
"(",
"'Actual device serialno=%s'",
"%",
"serialno",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"device",
",",
"serialno"
] |
https://github.com/dtmilano/AndroidViewClient/blob/421b86e3f1a57683557fc0173951cd0332ab43f4/src/com/dtmilano/android/viewclient.py#L2785-L2873
|
|
RidersDiscountCom/HypChat
|
d38e1a15031fdb11694c9ed971eb3e7e67e70907
|
hypchat/restobject.py
|
python
|
Room.latest
|
(self, not_before=None, maxResults=200)
|
return Linker._obj_from_text(resp.text, self._requests)
|
Return the latest room history.
If ``not_before`` is provided, messages that precede the message id will not be returned
|
Return the latest room history.
|
[
"Return",
"the",
"latest",
"room",
"history",
"."
] |
def latest(self, not_before=None, maxResults=200):
"""
Return the latest room history.
If ``not_before`` is provided, messages that precede the message id will not be returned
"""
params = {
"max-results": maxResults
}
if not_before is not None:
params["not-before"] = not_before
resp = self._requests.get(self.url + '/history/latest', params=params)
return Linker._obj_from_text(resp.text, self._requests)
|
[
"def",
"latest",
"(",
"self",
",",
"not_before",
"=",
"None",
",",
"maxResults",
"=",
"200",
")",
":",
"params",
"=",
"{",
"\"max-results\"",
":",
"maxResults",
"}",
"if",
"not_before",
"is",
"not",
"None",
":",
"params",
"[",
"\"not-before\"",
"]",
"=",
"not_before",
"resp",
"=",
"self",
".",
"_requests",
".",
"get",
"(",
"self",
".",
"url",
"+",
"'/history/latest'",
",",
"params",
"=",
"params",
")",
"return",
"Linker",
".",
"_obj_from_text",
"(",
"resp",
".",
"text",
",",
"self",
".",
"_requests",
")"
] |
https://github.com/RidersDiscountCom/HypChat/blob/d38e1a15031fdb11694c9ed971eb3e7e67e70907/hypchat/restobject.py#L189-L202
|
|
omz/PythonistaAppTemplate
|
f560f93f8876d82a21d108977f90583df08d55af
|
PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/werkzeug/http.py
|
python
|
is_entity_header
|
(header)
|
return header.lower() in _entity_headers
|
Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
|
Check if a header is an entity header.
|
[
"Check",
"if",
"a",
"header",
"is",
"an",
"entity",
"header",
"."
] |
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
|
[
"def",
"is_entity_header",
"(",
"header",
")",
":",
"return",
"header",
".",
"lower",
"(",
")",
"in",
"_entity_headers"
] |
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/werkzeug/http.py#L800-L808
|
|
debian-calibre/calibre
|
020fc81d3936a64b2ac51459ecb796666ab6a051
|
src/calibre/gui2/device.py
|
python
|
DeviceManager.slow_driveinfo
|
(self)
|
Update the stored device information with the driveinfo if the
device indicates that getting driveinfo is slow
|
Update the stored device information with the driveinfo if the
device indicates that getting driveinfo is slow
|
[
"Update",
"the",
"stored",
"device",
"information",
"with",
"the",
"driveinfo",
"if",
"the",
"device",
"indicates",
"that",
"getting",
"driveinfo",
"is",
"slow"
] |
def slow_driveinfo(self):
''' Update the stored device information with the driveinfo if the
device indicates that getting driveinfo is slow '''
info = self._device_information['info']
if (not info[4] and self.device.SLOW_DRIVEINFO):
info = list(info)
info[4] = self.device.get_driveinfo()
self._device_information['info'] = tuple(info)
|
[
"def",
"slow_driveinfo",
"(",
"self",
")",
":",
"info",
"=",
"self",
".",
"_device_information",
"[",
"'info'",
"]",
"if",
"(",
"not",
"info",
"[",
"4",
"]",
"and",
"self",
".",
"device",
".",
"SLOW_DRIVEINFO",
")",
":",
"info",
"=",
"list",
"(",
"info",
")",
"info",
"[",
"4",
"]",
"=",
"self",
".",
"device",
".",
"get_driveinfo",
"(",
")",
"self",
".",
"_device_information",
"[",
"'info'",
"]",
"=",
"tuple",
"(",
"info",
")"
] |
https://github.com/debian-calibre/calibre/blob/020fc81d3936a64b2ac51459ecb796666ab6a051/src/calibre/gui2/device.py#L498-L505
|
||
oracle/graalpython
|
577e02da9755d916056184ec441c26e00b70145c
|
graalpython/lib-python/3/asyncio/futures.py
|
python
|
Future._log_traceback
|
(self)
|
return self.__log_traceback
|
[] |
def _log_traceback(self):
return self.__log_traceback
|
[
"def",
"_log_traceback",
"(",
"self",
")",
":",
"return",
"self",
".",
"__log_traceback"
] |
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/asyncio/futures.py#L107-L108
|
|||
hivesolutions/netius
|
c6fa76292be0367557518462e0b2bccd852b0d3d
|
src/netius/base/common.py
|
python
|
AbstractBase.expand
|
(self, value, encoding = "utf-8", force = False)
|
return file_path
|
Expands the provided string/bytes value into a file in the
current file system so that it may be correctly used by interfaces
that require certain values to be file system based.
The generated file is going to be removed on the cleanup operation
so that no temporary file leaking occurs (garbage collection).
In case the force value is provided the the file is created even
for situations where the provided value is invalid/unset.
:type value: String
:param value: The string/bytes based value that is going to be
expanded into a proper file system based (temporary) file.
:type encoding: String
:param encoding: The encoding that is going to be used to convert
the value into a bytes based one in case the provided value is not
bytes compliant (and must be converted).
:type force: bool
:param force: If the expansion operation should be performed even
for situations where the value is considered invalid/unset.
:rtype: String
:return: The path to the temporary file that has just been generated
for the expansion of the provided value.
|
Expands the provided string/bytes value into a file in the
current file system so that it may be correctly used by interfaces
that require certain values to be file system based.
|
[
"Expands",
"the",
"provided",
"string",
"/",
"bytes",
"value",
"into",
"a",
"file",
"in",
"the",
"current",
"file",
"system",
"so",
"that",
"it",
"may",
"be",
"correctly",
"used",
"by",
"interfaces",
"that",
"require",
"certain",
"values",
"to",
"be",
"file",
"system",
"based",
"."
] |
def expand(self, value, encoding = "utf-8", force = False):
"""
Expands the provided string/bytes value into a file in the
current file system so that it may be correctly used by interfaces
that require certain values to be file system based.
The generated file is going to be removed on the cleanup operation
so that no temporary file leaking occurs (garbage collection).
In case the force value is provided the the file is created even
for situations where the provided value is invalid/unset.
:type value: String
:param value: The string/bytes based value that is going to be
expanded into a proper file system based (temporary) file.
:type encoding: String
:param encoding: The encoding that is going to be used to convert
the value into a bytes based one in case the provided value is not
bytes compliant (and must be converted).
:type force: bool
:param force: If the expansion operation should be performed even
for situations where the value is considered invalid/unset.
:rtype: String
:return: The path to the temporary file that has just been generated
for the expansion of the provided value.
"""
if not value and not force: return value
is_bytes = legacy.is_bytes(value)
if not is_bytes: value = value.encode(encoding)
value = value.replace(b"\\n", b"\n")
fd, file_path = tempfile.mkstemp()
os.close(fd)
file = open(file_path, "wb")
try: file.write(value)
finally: file.close()
self._expanded.append(file_path)
return file_path
|
[
"def",
"expand",
"(",
"self",
",",
"value",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"force",
"=",
"False",
")",
":",
"if",
"not",
"value",
"and",
"not",
"force",
":",
"return",
"value",
"is_bytes",
"=",
"legacy",
".",
"is_bytes",
"(",
"value",
")",
"if",
"not",
"is_bytes",
":",
"value",
"=",
"value",
".",
"encode",
"(",
"encoding",
")",
"value",
"=",
"value",
".",
"replace",
"(",
"b\"\\\\n\"",
",",
"b\"\\n\"",
")",
"fd",
",",
"file_path",
"=",
"tempfile",
".",
"mkstemp",
"(",
")",
"os",
".",
"close",
"(",
"fd",
")",
"file",
"=",
"open",
"(",
"file_path",
",",
"\"wb\"",
")",
"try",
":",
"file",
".",
"write",
"(",
"value",
")",
"finally",
":",
"file",
".",
"close",
"(",
")",
"self",
".",
"_expanded",
".",
"append",
"(",
"file_path",
")",
"return",
"file_path"
] |
https://github.com/hivesolutions/netius/blob/c6fa76292be0367557518462e0b2bccd852b0d3d/src/netius/base/common.py#L3083-L3120
|
|
LiDan456/MAD-GANs
|
3139a73a4112d3f3f18182c9a6cdc2c671e7cfe8
|
mod_core_rnn_cell_impl.py
|
python
|
_linear
|
(args, output_size, bias, bias_start=0.0, scope=None)
|
Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
|
Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
|
[
"Linear",
"map",
":",
"sum_i",
"(",
"args",
"[",
"i",
"]",
"*",
"W",
"[",
"i",
"]",
")",
"where",
"W",
"[",
"i",
"]",
"is",
"a",
"variable",
"."
] |
def _linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=init_ops.constant_initializer(bias_start, dtype=dtype))
return nn_ops.bias_add(res, biases)
|
[
"def",
"_linear",
"(",
"args",
",",
"output_size",
",",
"bias",
",",
"bias_start",
"=",
"0.0",
",",
"scope",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
"or",
"(",
"nest",
".",
"is_sequence",
"(",
"args",
")",
"and",
"not",
"args",
")",
":",
"raise",
"ValueError",
"(",
"\"`args` must be specified\"",
")",
"if",
"not",
"nest",
".",
"is_sequence",
"(",
"args",
")",
":",
"args",
"=",
"[",
"args",
"]",
"# Calculate the total size of arguments on dimension 1.",
"total_arg_size",
"=",
"0",
"shapes",
"=",
"[",
"a",
".",
"get_shape",
"(",
")",
"for",
"a",
"in",
"args",
"]",
"for",
"shape",
"in",
"shapes",
":",
"if",
"shape",
".",
"ndims",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"linear is expecting 2D arguments: %s\"",
"%",
"shapes",
")",
"if",
"shape",
"[",
"1",
"]",
".",
"value",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"linear expects shape[1] to be provided for shape %s, \"",
"\"but saw %s\"",
"%",
"(",
"shape",
",",
"shape",
"[",
"1",
"]",
")",
")",
"else",
":",
"total_arg_size",
"+=",
"shape",
"[",
"1",
"]",
".",
"value",
"dtype",
"=",
"[",
"a",
".",
"dtype",
"for",
"a",
"in",
"args",
"]",
"[",
"0",
"]",
"# Now the computation.",
"scope",
"=",
"vs",
".",
"get_variable_scope",
"(",
")",
"with",
"vs",
".",
"variable_scope",
"(",
"scope",
")",
"as",
"outer_scope",
":",
"weights",
"=",
"vs",
".",
"get_variable",
"(",
"_WEIGHTS_VARIABLE_NAME",
",",
"[",
"total_arg_size",
",",
"output_size",
"]",
",",
"dtype",
"=",
"dtype",
")",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"res",
"=",
"math_ops",
".",
"matmul",
"(",
"args",
"[",
"0",
"]",
",",
"weights",
")",
"else",
":",
"res",
"=",
"math_ops",
".",
"matmul",
"(",
"array_ops",
".",
"concat",
"(",
"args",
",",
"1",
")",
",",
"weights",
")",
"if",
"not",
"bias",
":",
"return",
"res",
"with",
"vs",
".",
"variable_scope",
"(",
"outer_scope",
")",
"as",
"inner_scope",
":",
"inner_scope",
".",
"set_partitioner",
"(",
"None",
")",
"biases",
"=",
"vs",
".",
"get_variable",
"(",
"_BIAS_VARIABLE_NAME",
",",
"[",
"output_size",
"]",
",",
"dtype",
"=",
"dtype",
",",
"initializer",
"=",
"init_ops",
".",
"constant_initializer",
"(",
"bias_start",
",",
"dtype",
"=",
"dtype",
")",
")",
"return",
"nn_ops",
".",
"bias_add",
"(",
"res",
",",
"biases",
")"
] |
https://github.com/LiDan456/MAD-GANs/blob/3139a73a4112d3f3f18182c9a6cdc2c671e7cfe8/mod_core_rnn_cell_impl.py#L1011-L1063
|
||
GeospatialPython/pyshp
|
05e18f5171a6346a675646cb8bc7a8f84ed8d5d6
|
shapefile.py
|
python
|
Reader.iterRecords
|
(self, fields=None)
|
Returns a generator of records in a dbf file.
Useful for large shapefiles or dbf files.
To only read some of the fields, specify the 'fields' arg as a
list of one or more fieldnames.
|
Returns a generator of records in a dbf file.
Useful for large shapefiles or dbf files.
To only read some of the fields, specify the 'fields' arg as a
list of one or more fieldnames.
|
[
"Returns",
"a",
"generator",
"of",
"records",
"in",
"a",
"dbf",
"file",
".",
"Useful",
"for",
"large",
"shapefiles",
"or",
"dbf",
"files",
".",
"To",
"only",
"read",
"some",
"of",
"the",
"fields",
"specify",
"the",
"fields",
"arg",
"as",
"a",
"list",
"of",
"one",
"or",
"more",
"fieldnames",
"."
] |
def iterRecords(self, fields=None):
"""Returns a generator of records in a dbf file.
Useful for large shapefiles or dbf files.
To only read some of the fields, specify the 'fields' arg as a
list of one or more fieldnames.
"""
if self.numRecords is None:
self.__dbfHeader()
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHdrLength)
fieldTuples,recLookup,recStruct = self.__recordFields(fields)
for i in xrange(self.numRecords):
r = self.__record(oid=i, fieldTuples=fieldTuples, recLookup=recLookup, recStruct=recStruct)
if r:
yield r
|
[
"def",
"iterRecords",
"(",
"self",
",",
"fields",
"=",
"None",
")",
":",
"if",
"self",
".",
"numRecords",
"is",
"None",
":",
"self",
".",
"__dbfHeader",
"(",
")",
"f",
"=",
"self",
".",
"__getFileObj",
"(",
"self",
".",
"dbf",
")",
"f",
".",
"seek",
"(",
"self",
".",
"__dbfHdrLength",
")",
"fieldTuples",
",",
"recLookup",
",",
"recStruct",
"=",
"self",
".",
"__recordFields",
"(",
"fields",
")",
"for",
"i",
"in",
"xrange",
"(",
"self",
".",
"numRecords",
")",
":",
"r",
"=",
"self",
".",
"__record",
"(",
"oid",
"=",
"i",
",",
"fieldTuples",
"=",
"fieldTuples",
",",
"recLookup",
"=",
"recLookup",
",",
"recStruct",
"=",
"recStruct",
")",
"if",
"r",
":",
"yield",
"r"
] |
https://github.com/GeospatialPython/pyshp/blob/05e18f5171a6346a675646cb8bc7a8f84ed8d5d6/shapefile.py#L1617-L1631
|
||
leancloud/satori
|
701caccbd4fe45765001ca60435c0cb499477c03
|
satori-rules/plugin/libs/urllib3/contrib/pyopenssl.py
|
python
|
WrappedSocket._decref_socketios
|
(self)
|
[] |
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
|
[
"def",
"_decref_socketios",
"(",
"self",
")",
":",
"if",
"self",
".",
"_makefile_refs",
">",
"0",
":",
"self",
".",
"_makefile_refs",
"-=",
"1",
"if",
"self",
".",
"_closed",
":",
"self",
".",
"close",
"(",
")"
] |
https://github.com/leancloud/satori/blob/701caccbd4fe45765001ca60435c0cb499477c03/satori-rules/plugin/libs/urllib3/contrib/pyopenssl.py#L271-L275
|
||||
tensorflow/model-analysis
|
e38c23ce76eff039548ce69e3160ed4d7984f2fc
|
tensorflow_model_analysis/eval_saved_model/example_trainers/fake_multi_examples_per_input_estimator.py
|
python
|
_parse_csv
|
(rows_string_tensor)
|
return {
'example_count': tf.gather(example_count, input_index),
'input_index': input_index,
'intra_input_index': intra_input_index,
'annotation': annotation,
}
|
Takes the string input tensor and returns a dict of rank-2 tensors.
|
Takes the string input tensor and returns a dict of rank-2 tensors.
|
[
"Takes",
"the",
"string",
"input",
"tensor",
"and",
"returns",
"a",
"dict",
"of",
"rank",
"-",
"2",
"tensors",
"."
] |
def _parse_csv(rows_string_tensor):
"""Takes the string input tensor and returns a dict of rank-2 tensors."""
example_count = tf.io.decode_csv(
records=rows_string_tensor,
record_defaults=[tf.constant([0], dtype=tf.int32, shape=None)])[0]
input_index, intra_input_index = _indices_from_example_count(example_count)
annotation = tf.strings.join([
'raw_input: ',
tf.gather(rows_string_tensor, input_index), '; index: ',
tf.as_string(intra_input_index)
])
return {
'example_count': tf.gather(example_count, input_index),
'input_index': input_index,
'intra_input_index': intra_input_index,
'annotation': annotation,
}
|
[
"def",
"_parse_csv",
"(",
"rows_string_tensor",
")",
":",
"example_count",
"=",
"tf",
".",
"io",
".",
"decode_csv",
"(",
"records",
"=",
"rows_string_tensor",
",",
"record_defaults",
"=",
"[",
"tf",
".",
"constant",
"(",
"[",
"0",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"None",
")",
"]",
")",
"[",
"0",
"]",
"input_index",
",",
"intra_input_index",
"=",
"_indices_from_example_count",
"(",
"example_count",
")",
"annotation",
"=",
"tf",
".",
"strings",
".",
"join",
"(",
"[",
"'raw_input: '",
",",
"tf",
".",
"gather",
"(",
"rows_string_tensor",
",",
"input_index",
")",
",",
"'; index: '",
",",
"tf",
".",
"as_string",
"(",
"intra_input_index",
")",
"]",
")",
"return",
"{",
"'example_count'",
":",
"tf",
".",
"gather",
"(",
"example_count",
",",
"input_index",
")",
",",
"'input_index'",
":",
"input_index",
",",
"'intra_input_index'",
":",
"intra_input_index",
",",
"'annotation'",
":",
"annotation",
",",
"}"
] |
https://github.com/tensorflow/model-analysis/blob/e38c23ce76eff039548ce69e3160ed4d7984f2fc/tensorflow_model_analysis/eval_saved_model/example_trainers/fake_multi_examples_per_input_estimator.py#L65-L83
|
|
pyscf/pyscf
|
0adfb464333f5ceee07b664f291d4084801bae64
|
pyscf/dft/libxc.py
|
python
|
parse_xc_name
|
(xc_name='LDA,VWN')
|
return fn_facs[0][0], fn_facs[1][0]
|
Convert the XC functional name to libxc library internal ID.
|
Convert the XC functional name to libxc library internal ID.
|
[
"Convert",
"the",
"XC",
"functional",
"name",
"to",
"libxc",
"library",
"internal",
"ID",
"."
] |
def parse_xc_name(xc_name='LDA,VWN'):
'''Convert the XC functional name to libxc library internal ID.
'''
fn_facs = parse_xc(xc_name)[1]
return fn_facs[0][0], fn_facs[1][0]
|
[
"def",
"parse_xc_name",
"(",
"xc_name",
"=",
"'LDA,VWN'",
")",
":",
"fn_facs",
"=",
"parse_xc",
"(",
"xc_name",
")",
"[",
"1",
"]",
"return",
"fn_facs",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"fn_facs",
"[",
"1",
"]",
"[",
"0",
"]"
] |
https://github.com/pyscf/pyscf/blob/0adfb464333f5ceee07b664f291d4084801bae64/pyscf/dft/libxc.py#L1036-L1040
|
|
Zeta36/tensorflow-tex-wavenet
|
f252ad805e20d2498755f0aafd18549f6f9823d4
|
wavenet/text_reader.py
|
python
|
load_generic_text
|
(directory)
|
Generator that yields text raw from the directory.
|
Generator that yields text raw from the directory.
|
[
"Generator",
"that",
"yields",
"text",
"raw",
"from",
"the",
"directory",
"."
] |
def load_generic_text(directory):
'''Generator that yields text raw from the directory.'''
files = find_files(directory)
for filename in files:
text = _read_text(filename)
for index, item in enumerate(text):
text[index] = ord(text[index])
text = np.array(text, dtype='float32')
text = text.reshape(-1, 1)
yield text, filename
|
[
"def",
"load_generic_text",
"(",
"directory",
")",
":",
"files",
"=",
"find_files",
"(",
"directory",
")",
"for",
"filename",
"in",
"files",
":",
"text",
"=",
"_read_text",
"(",
"filename",
")",
"for",
"index",
",",
"item",
"in",
"enumerate",
"(",
"text",
")",
":",
"text",
"[",
"index",
"]",
"=",
"ord",
"(",
"text",
"[",
"index",
"]",
")",
"text",
"=",
"np",
".",
"array",
"(",
"text",
",",
"dtype",
"=",
"'float32'",
")",
"text",
"=",
"text",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"yield",
"text",
",",
"filename"
] |
https://github.com/Zeta36/tensorflow-tex-wavenet/blob/f252ad805e20d2498755f0aafd18549f6f9823d4/wavenet/text_reader.py#L21-L30
|
||
ansible/ansibullbot
|
c0a777dba16411db2ab0ce1a13eb166de2bed063
|
ansibullbot/plugins/notifications.py
|
python
|
get_notification_facts
|
(issuewrapper, meta, botmeta=None)
|
return nfacts
|
Build facts about mentions/pings
|
Build facts about mentions/pings
|
[
"Build",
"facts",
"about",
"mentions",
"/",
"pings"
] |
def get_notification_facts(issuewrapper, meta, botmeta=None):
'''Build facts about mentions/pings'''
iw = issuewrapper
nfacts = {
'to_notify': [],
'to_assign': []
}
if botmeta and not botmeta.get('notifications', False):
return nfacts
if iw.is_pullrequest() and iw.merge_commits:
return nfacts
# who is assigned?
current_assignees = iw.assignees
# add people from files and from matches
if iw.is_pullrequest() or meta.get('guessed_components') or meta.get('component_matches') or meta.get('module_match'):
fassign = sorted(set(meta['component_maintainers'][:]))
fnotify = sorted(set(meta['component_notifiers'][:]))
if 'ansible' in fassign:
fassign.remove('ansible')
if 'ansible' in fnotify:
fnotify.remove('ansible')
for user in fnotify:
if user == iw.submitter:
continue
if not iw.history.last_notified(user) and \
not iw.history.was_assigned(user) and \
not iw.history.was_subscribed(user) and \
not iw.history.last_comment(user):
nfacts['to_notify'].append(user)
else:
logging.info(f'{user} already notified')
for user in fassign:
if user == iw.submitter:
continue
if user in nfacts['to_assign']:
continue
#if user not in current_assignees and iw.repo.repo.has_in_assignees(user):
if user not in current_assignees and iw.repo.has_in_assignees(user):
nfacts['to_assign'].append(user)
# prevent duplication
nfacts['to_assign'] = sorted(set(nfacts['to_assign']))
nfacts['to_notify'] = sorted(
set(nfacts['to_notify']) # + nfacts[u'to_assign'])
)
return nfacts
|
[
"def",
"get_notification_facts",
"(",
"issuewrapper",
",",
"meta",
",",
"botmeta",
"=",
"None",
")",
":",
"iw",
"=",
"issuewrapper",
"nfacts",
"=",
"{",
"'to_notify'",
":",
"[",
"]",
",",
"'to_assign'",
":",
"[",
"]",
"}",
"if",
"botmeta",
"and",
"not",
"botmeta",
".",
"get",
"(",
"'notifications'",
",",
"False",
")",
":",
"return",
"nfacts",
"if",
"iw",
".",
"is_pullrequest",
"(",
")",
"and",
"iw",
".",
"merge_commits",
":",
"return",
"nfacts",
"# who is assigned?",
"current_assignees",
"=",
"iw",
".",
"assignees",
"# add people from files and from matches",
"if",
"iw",
".",
"is_pullrequest",
"(",
")",
"or",
"meta",
".",
"get",
"(",
"'guessed_components'",
")",
"or",
"meta",
".",
"get",
"(",
"'component_matches'",
")",
"or",
"meta",
".",
"get",
"(",
"'module_match'",
")",
":",
"fassign",
"=",
"sorted",
"(",
"set",
"(",
"meta",
"[",
"'component_maintainers'",
"]",
"[",
":",
"]",
")",
")",
"fnotify",
"=",
"sorted",
"(",
"set",
"(",
"meta",
"[",
"'component_notifiers'",
"]",
"[",
":",
"]",
")",
")",
"if",
"'ansible'",
"in",
"fassign",
":",
"fassign",
".",
"remove",
"(",
"'ansible'",
")",
"if",
"'ansible'",
"in",
"fnotify",
":",
"fnotify",
".",
"remove",
"(",
"'ansible'",
")",
"for",
"user",
"in",
"fnotify",
":",
"if",
"user",
"==",
"iw",
".",
"submitter",
":",
"continue",
"if",
"not",
"iw",
".",
"history",
".",
"last_notified",
"(",
"user",
")",
"and",
"not",
"iw",
".",
"history",
".",
"was_assigned",
"(",
"user",
")",
"and",
"not",
"iw",
".",
"history",
".",
"was_subscribed",
"(",
"user",
")",
"and",
"not",
"iw",
".",
"history",
".",
"last_comment",
"(",
"user",
")",
":",
"nfacts",
"[",
"'to_notify'",
"]",
".",
"append",
"(",
"user",
")",
"else",
":",
"logging",
".",
"info",
"(",
"f'{user} already notified'",
")",
"for",
"user",
"in",
"fassign",
":",
"if",
"user",
"==",
"iw",
".",
"submitter",
":",
"continue",
"if",
"user",
"in",
"nfacts",
"[",
"'to_assign'",
"]",
":",
"continue",
"#if user not in current_assignees and iw.repo.repo.has_in_assignees(user):",
"if",
"user",
"not",
"in",
"current_assignees",
"and",
"iw",
".",
"repo",
".",
"has_in_assignees",
"(",
"user",
")",
":",
"nfacts",
"[",
"'to_assign'",
"]",
".",
"append",
"(",
"user",
")",
"# prevent duplication",
"nfacts",
"[",
"'to_assign'",
"]",
"=",
"sorted",
"(",
"set",
"(",
"nfacts",
"[",
"'to_assign'",
"]",
")",
")",
"nfacts",
"[",
"'to_notify'",
"]",
"=",
"sorted",
"(",
"set",
"(",
"nfacts",
"[",
"'to_notify'",
"]",
")",
"# + nfacts[u'to_assign'])",
")",
"return",
"nfacts"
] |
https://github.com/ansible/ansibullbot/blob/c0a777dba16411db2ab0ce1a13eb166de2bed063/ansibullbot/plugins/notifications.py#L4-L61
|
|
PythonJS/PythonJS
|
591a80afd8233fb715493591db2b68f1748558d9
|
pythonjs/lib/python2.7/codecs.py
|
python
|
StreamRecoder.next
|
(self)
|
return data
|
Return the next decoded line from the input stream.
|
Return the next decoded line from the input stream.
|
[
"Return",
"the",
"next",
"decoded",
"line",
"from",
"the",
"input",
"stream",
"."
] |
def next(self):
""" Return the next decoded line from the input stream."""
data = self.reader.next()
data, bytesencoded = self.encode(data, self.errors)
return data
|
[
"def",
"next",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"reader",
".",
"next",
"(",
")",
"data",
",",
"bytesencoded",
"=",
"self",
".",
"encode",
"(",
"data",
",",
"self",
".",
"errors",
")",
"return",
"data"
] |
https://github.com/PythonJS/PythonJS/blob/591a80afd8233fb715493591db2b68f1748558d9/pythonjs/lib/python2.7/codecs.py#L800-L805
|
|
open-cogsci/OpenSesame
|
c4a3641b097a80a76937edbd8c365f036bcc9705
|
opensesameandroid.py
|
python
|
main
|
()
|
The main routine, which is called automatically by pgs4a
|
The main routine, which is called automatically by pgs4a
|
[
"The",
"main",
"routine",
"which",
"is",
"called",
"automatically",
"by",
"pgs4a"
] |
def main():
"""The main routine, which is called automatically by pgs4a"""
if android is not None:
sys.stdout = stdout_file(sys.stdout)
# First check if an autorun file has been specified. This is a yaml file
# with the experiment path, subject nr, and logfile in it.
for folder in sdcard_folders:
path = os.path.join(folder, 'opensesame-autorun.yml')
print(path)
if os.path.exists(path):
d = yaml.load(open(path))
experiment_path = d['experiment']
subject_nr = d['subject_nr']
logfile = d['logfile']
break
# If no autorun file has been specified, we launch the menu experiment.
else:
src = 'opensesame_resources/android/menu.osexp'
print('Launching %s' % src)
menu = experiment('Experiment', src)
menu.experiment_path = None
menu.run()
menu.end()
clean_up(menu.debug)
experiment_path = menu._experiment
subject_nr = menu._subject_nr
logfile = menu._logfile
# Next run the actual experiment!
try:
exp = experiment(name='Experiment', string=experiment_path,
experiment_path=os.path.dirname(experiment_path))
except Exception as e:
for s in traceback.format_exc(e).split("\n"):
print(s)
print('Launching %s' % experiment_path)
exp.set_subject(subject_nr)
exp.logfile = logfile
# Capture exceptions and write them to the standard output so they can be
# inspected
try:
exp.run()
except Exception as e:
for s in traceback.format_exc(e).split("\n"):
print(s)
try:
exp.end()
except Exception as e:
for s in traceback.format_exc(e).split("\n"):
print(s)
clean_up(exp.debug)
pygame.display.quit()
|
[
"def",
"main",
"(",
")",
":",
"if",
"android",
"is",
"not",
"None",
":",
"sys",
".",
"stdout",
"=",
"stdout_file",
"(",
"sys",
".",
"stdout",
")",
"# First check if an autorun file has been specified. This is a yaml file",
"# with the experiment path, subject nr, and logfile in it.",
"for",
"folder",
"in",
"sdcard_folders",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'opensesame-autorun.yml'",
")",
"print",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"d",
"=",
"yaml",
".",
"load",
"(",
"open",
"(",
"path",
")",
")",
"experiment_path",
"=",
"d",
"[",
"'experiment'",
"]",
"subject_nr",
"=",
"d",
"[",
"'subject_nr'",
"]",
"logfile",
"=",
"d",
"[",
"'logfile'",
"]",
"break",
"# If no autorun file has been specified, we launch the menu experiment.",
"else",
":",
"src",
"=",
"'opensesame_resources/android/menu.osexp'",
"print",
"(",
"'Launching %s'",
"%",
"src",
")",
"menu",
"=",
"experiment",
"(",
"'Experiment'",
",",
"src",
")",
"menu",
".",
"experiment_path",
"=",
"None",
"menu",
".",
"run",
"(",
")",
"menu",
".",
"end",
"(",
")",
"clean_up",
"(",
"menu",
".",
"debug",
")",
"experiment_path",
"=",
"menu",
".",
"_experiment",
"subject_nr",
"=",
"menu",
".",
"_subject_nr",
"logfile",
"=",
"menu",
".",
"_logfile",
"# Next run the actual experiment!",
"try",
":",
"exp",
"=",
"experiment",
"(",
"name",
"=",
"'Experiment'",
",",
"string",
"=",
"experiment_path",
",",
"experiment_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"experiment_path",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"for",
"s",
"in",
"traceback",
".",
"format_exc",
"(",
"e",
")",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"print",
"(",
"s",
")",
"print",
"(",
"'Launching %s'",
"%",
"experiment_path",
")",
"exp",
".",
"set_subject",
"(",
"subject_nr",
")",
"exp",
".",
"logfile",
"=",
"logfile",
"# Capture exceptions and write them to the standard output so they can be",
"# inspected",
"try",
":",
"exp",
".",
"run",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"for",
"s",
"in",
"traceback",
".",
"format_exc",
"(",
"e",
")",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"print",
"(",
"s",
")",
"try",
":",
"exp",
".",
"end",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"for",
"s",
"in",
"traceback",
".",
"format_exc",
"(",
"e",
")",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"print",
"(",
"s",
")",
"clean_up",
"(",
"exp",
".",
"debug",
")",
"pygame",
".",
"display",
".",
"quit",
"(",
")"
] |
https://github.com/open-cogsci/OpenSesame/blob/c4a3641b097a80a76937edbd8c365f036bcc9705/opensesameandroid.py#L77-L133
|
||
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/modules/mod_random.py
|
python
|
seed
|
(range=10, hash=None)
|
return random.randrange(range)
|
Returns a random number within a range. Optional hash argument can
be any hashable object. If hash is omitted or None, the id of the minion is used.
.. versionadded:: 2015.8.0
hash: None
Any hashable object.
range: 10
Any valid integer number
CLI Example:
.. code-block:: bash
salt '*' random.seed 10 hash=None
|
Returns a random number within a range. Optional hash argument can
be any hashable object. If hash is omitted or None, the id of the minion is used.
|
[
"Returns",
"a",
"random",
"number",
"within",
"a",
"range",
".",
"Optional",
"hash",
"argument",
"can",
"be",
"any",
"hashable",
"object",
".",
"If",
"hash",
"is",
"omitted",
"or",
"None",
"the",
"id",
"of",
"the",
"minion",
"is",
"used",
"."
] |
def seed(range=10, hash=None):
"""
Returns a random number within a range. Optional hash argument can
be any hashable object. If hash is omitted or None, the id of the minion is used.
.. versionadded:: 2015.8.0
hash: None
Any hashable object.
range: 10
Any valid integer number
CLI Example:
.. code-block:: bash
salt '*' random.seed 10 hash=None
"""
if hash is None:
hash = __grains__["id"]
random.seed(hash)
return random.randrange(range)
|
[
"def",
"seed",
"(",
"range",
"=",
"10",
",",
"hash",
"=",
"None",
")",
":",
"if",
"hash",
"is",
"None",
":",
"hash",
"=",
"__grains__",
"[",
"\"id\"",
"]",
"random",
".",
"seed",
"(",
"hash",
")",
"return",
"random",
".",
"randrange",
"(",
"range",
")"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/mod_random.py#L256-L279
|
|
pyparallel/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
Lib/email/_header_value_parser.py
|
python
|
_validate_xtext
|
(xtext)
|
If input token contains ASCII non-printables, register a defect.
|
If input token contains ASCII non-printables, register a defect.
|
[
"If",
"input",
"token",
"contains",
"ASCII",
"non",
"-",
"printables",
"register",
"a",
"defect",
"."
] |
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token"))
|
[
"def",
"_validate_xtext",
"(",
"xtext",
")",
":",
"non_printables",
"=",
"_non_printable_finder",
"(",
"xtext",
")",
"if",
"non_printables",
":",
"xtext",
".",
"defects",
".",
"append",
"(",
"errors",
".",
"NonPrintableDefect",
"(",
"non_printables",
")",
")",
"if",
"utils",
".",
"_has_surrogates",
"(",
"xtext",
")",
":",
"xtext",
".",
"defects",
".",
"append",
"(",
"errors",
".",
"UndecodableBytesDefect",
"(",
"\"Non-ASCII characters found in header token\"",
")",
")"
] |
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/email/_header_value_parser.py#L1349-L1357
|
||
pymedusa/Medusa
|
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
|
lib/pkg_resources/_vendor/pyparsing.py
|
python
|
ParseResults.haskeys
|
( self )
|
return bool(self.__tokdict)
|
Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names.
|
Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names.
|
[
"Since",
"keys",
"()",
"returns",
"an",
"iterator",
"this",
"method",
"is",
"helpful",
"in",
"bypassing",
"code",
"that",
"looks",
"for",
"the",
"existence",
"of",
"any",
"defined",
"results",
"names",
"."
] |
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
|
[
"def",
"haskeys",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"__tokdict",
")"
] |
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/lib/pkg_resources/_vendor/pyparsing.py#L506-L509
|
|
dimagi/commcare-hq
|
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
|
corehq/apps/accounting/forms.py
|
python
|
SubscriptionForm.clean_domain
|
(self)
|
return domain
|
[] |
def clean_domain(self):
domain = self.cleaned_data['domain']
if self.fields['domain'].required:
domain_obj = Domain.get_by_name(domain)
if domain_obj is None:
raise forms.ValidationError(_("A valid project space is required."))
return domain
|
[
"def",
"clean_domain",
"(",
"self",
")",
":",
"domain",
"=",
"self",
".",
"cleaned_data",
"[",
"'domain'",
"]",
"if",
"self",
".",
"fields",
"[",
"'domain'",
"]",
".",
"required",
":",
"domain_obj",
"=",
"Domain",
".",
"get_by_name",
"(",
"domain",
")",
"if",
"domain_obj",
"is",
"None",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"\"A valid project space is required.\"",
")",
")",
"return",
"domain"
] |
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/accounting/forms.py#L792-L798
|
|||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/whoosh/qparser/plugins.py
|
python
|
MultifieldPlugin.filters
|
(self, parser)
|
return [(self.do_multifield, 110)]
|
[] |
def filters(self, parser):
# Run after the fields filter applies explicit fieldnames (at priority
# 100)
return [(self.do_multifield, 110)]
|
[
"def",
"filters",
"(",
"self",
",",
"parser",
")",
":",
"# Run after the fields filter applies explicit fieldnames (at priority",
"# 100)",
"return",
"[",
"(",
"self",
".",
"do_multifield",
",",
"110",
")",
"]"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/whoosh/qparser/plugins.py#L1164-L1167
|
|||
jensl/critic
|
c2d962b909ff7ef2f09bccbeb636333920b3659e
|
src/api/comment.py
|
python
|
Comment.text
|
(self)
|
return self._impl.text
|
The comment's text
|
The comment's text
|
[
"The",
"comment",
"s",
"text"
] |
def text(self):
"""The comment's text"""
return self._impl.text
|
[
"def",
"text",
"(",
"self",
")",
":",
"return",
"self",
".",
"_impl",
".",
"text"
] |
https://github.com/jensl/critic/blob/c2d962b909ff7ef2f09bccbeb636333920b3659e/src/api/comment.py#L101-L103
|
|
brython-dev/brython
|
9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3
|
www/src/Lib/site-packages/simpleaio/futures.py
|
python
|
Future._schedule_callbacks
|
(self)
|
Internal: Ask the event loop to call all callbacks.
The callbacks are scheduled to be called as soon as possible. Also
clears the callback list.
|
Internal: Ask the event loop to call all callbacks.
|
[
"Internal",
":",
"Ask",
"the",
"event",
"loop",
"to",
"call",
"all",
"callbacks",
"."
] |
def _schedule_callbacks(self):
"""Internal: Ask the event loop to call all callbacks.
The callbacks are scheduled to be called as soon as possible. Also
clears the callback list.
"""
callbacks = self._callbacks[:]
if not callbacks:
return
self._callbacks[:] = []
for callback in callbacks:
self._loop.call_soon(callback, self)
|
[
"def",
"_schedule_callbacks",
"(",
"self",
")",
":",
"callbacks",
"=",
"self",
".",
"_callbacks",
"[",
":",
"]",
"if",
"not",
"callbacks",
":",
"return",
"self",
".",
"_callbacks",
"[",
":",
"]",
"=",
"[",
"]",
"for",
"callback",
"in",
"callbacks",
":",
"self",
".",
"_loop",
".",
"call_soon",
"(",
"callback",
",",
"self",
")"
] |
https://github.com/brython-dev/brython/blob/9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3/www/src/Lib/site-packages/simpleaio/futures.py#L221-L233
|
||
shiva-spampot/shiva
|
7d7208e96289dc684ebd69b7ed5818d73bbfaffe
|
analyzer/core/server.py
|
python
|
QueueReceiver.process_message
|
(self, msg)
|
Exactly the same as SMTPReceiver.process_message but just designed for the queue's
quirks.
|
Exactly the same as SMTPReceiver.process_message but just designed for the queue's
quirks.
|
[
"Exactly",
"the",
"same",
"as",
"SMTPReceiver",
".",
"process_message",
"but",
"just",
"designed",
"for",
"the",
"queue",
"s",
"quirks",
"."
] |
def process_message(self, msg):
"""
Exactly the same as SMTPReceiver.process_message but just designed for the queue's
quirks.
"""
#self.msg = self.start.msg
try:
Peer = self.queue_dir
From = msg['from']
To = [msg['to']]
logging.debug("Message received from Peer: %r, From: %r, to To %r." % (Peer, From, To))
routing.Router.deliver(msg)
except SMTPError, err:
# looks like they want to return an error, so send it out
logging.exception("Raising SMTPError when running in a QueueReceiver is unsupported.")
undeliverable_message(msg.original, err.message)
except:
logging.exception("Exception while processing message from Peer: "
"%r, From: %r, to To %r." % (Peer, From, To))
undeliverable_message(msg.original, "Router failed to catch exception.")
|
[
"def",
"process_message",
"(",
"self",
",",
"msg",
")",
":",
"#self.msg = self.start.msg",
"try",
":",
"Peer",
"=",
"self",
".",
"queue_dir",
"From",
"=",
"msg",
"[",
"'from'",
"]",
"To",
"=",
"[",
"msg",
"[",
"'to'",
"]",
"]",
"logging",
".",
"debug",
"(",
"\"Message received from Peer: %r, From: %r, to To %r.\"",
"%",
"(",
"Peer",
",",
"From",
",",
"To",
")",
")",
"routing",
".",
"Router",
".",
"deliver",
"(",
"msg",
")",
"except",
"SMTPError",
",",
"err",
":",
"# looks like they want to return an error, so send it out",
"logging",
".",
"exception",
"(",
"\"Raising SMTPError when running in a QueueReceiver is unsupported.\"",
")",
"undeliverable_message",
"(",
"msg",
".",
"original",
",",
"err",
".",
"message",
")",
"except",
":",
"logging",
".",
"exception",
"(",
"\"Exception while processing message from Peer: \"",
"\"%r, From: %r, to To %r.\"",
"%",
"(",
"Peer",
",",
"From",
",",
"To",
")",
")",
"undeliverable_message",
"(",
"msg",
".",
"original",
",",
"\"Router failed to catch exception.\"",
")"
] |
https://github.com/shiva-spampot/shiva/blob/7d7208e96289dc684ebd69b7ed5818d73bbfaffe/analyzer/core/server.py#L313-L334
|
||
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/rings/polynomial/polynomial_quotient_ring.py
|
python
|
PolynomialQuotientRing_generic._coerce_map_from_
|
(self, R)
|
r"""
Return a coerce map from ``R``.
Anything coercing into the underlying polynomial ring coerces into this
quotient. Furthermore, for quotients `R=A[x]/(f)` and `S=B[x]/(g)` with
a coercion `R\to S` there is a coercion iff `f` divides `g`.
AUTHOR:
- Simon King (2010-12): :trac:`8800`
TESTS::
sage: P5.<x> = GF(5)[]
sage: Q = P5.quo([(x^2+1)^2])
sage: P.<x> = ZZ[]
sage: Q1 = P.quo([(x^2+1)^2*(x^2-3)])
sage: Q2 = P.quo([(x^2+1)^2*(x^5+3)])
sage: Q.has_coerce_map_from(Q1) #indirect doctest
True
sage: Q1.has_coerce_map_from(Q)
False
sage: Q1.has_coerce_map_from(Q2)
False
The following tests against a bug fixed in :trac:`8992`::
sage: P.<x> = QQ[]
sage: Q1 = P.quo([(x^2+1)^2*(x^2-3)])
sage: R.<y> = P[]
sage: Q2 = R.quo([(y^2+1)])
sage: Q2.has_coerce_map_from(Q1)
False
|
r"""
Return a coerce map from ``R``.
|
[
"r",
"Return",
"a",
"coerce",
"map",
"from",
"R",
"."
] |
def _coerce_map_from_(self, R):
r"""
Return a coerce map from ``R``.
Anything coercing into the underlying polynomial ring coerces into this
quotient. Furthermore, for quotients `R=A[x]/(f)` and `S=B[x]/(g)` with
a coercion `R\to S` there is a coercion iff `f` divides `g`.
AUTHOR:
- Simon King (2010-12): :trac:`8800`
TESTS::
sage: P5.<x> = GF(5)[]
sage: Q = P5.quo([(x^2+1)^2])
sage: P.<x> = ZZ[]
sage: Q1 = P.quo([(x^2+1)^2*(x^2-3)])
sage: Q2 = P.quo([(x^2+1)^2*(x^5+3)])
sage: Q.has_coerce_map_from(Q1) #indirect doctest
True
sage: Q1.has_coerce_map_from(Q)
False
sage: Q1.has_coerce_map_from(Q2)
False
The following tests against a bug fixed in :trac:`8992`::
sage: P.<x> = QQ[]
sage: Q1 = P.quo([(x^2+1)^2*(x^2-3)])
sage: R.<y> = P[]
sage: Q2 = R.quo([(y^2+1)])
sage: Q2.has_coerce_map_from(Q1)
False
"""
if self.__ring.has_coerce_map_from(R):
return True
if isinstance(R, PolynomialQuotientRing_generic):
try:
if not self.__polynomial.divides(R.modulus()):
return False
except (ZeroDivisionError,ArithmeticError):
return False
from sage.all import Hom
parent = Hom(R, self, category=self.category()._meet_(R.category()))
return parent.__make_element_class__(PolynomialQuotientRing_coercion)(R, self, category=parent.homset_category())
|
[
"def",
"_coerce_map_from_",
"(",
"self",
",",
"R",
")",
":",
"if",
"self",
".",
"__ring",
".",
"has_coerce_map_from",
"(",
"R",
")",
":",
"return",
"True",
"if",
"isinstance",
"(",
"R",
",",
"PolynomialQuotientRing_generic",
")",
":",
"try",
":",
"if",
"not",
"self",
".",
"__polynomial",
".",
"divides",
"(",
"R",
".",
"modulus",
"(",
")",
")",
":",
"return",
"False",
"except",
"(",
"ZeroDivisionError",
",",
"ArithmeticError",
")",
":",
"return",
"False",
"from",
"sage",
".",
"all",
"import",
"Hom",
"parent",
"=",
"Hom",
"(",
"R",
",",
"self",
",",
"category",
"=",
"self",
".",
"category",
"(",
")",
".",
"_meet_",
"(",
"R",
".",
"category",
"(",
")",
")",
")",
"return",
"parent",
".",
"__make_element_class__",
"(",
"PolynomialQuotientRing_coercion",
")",
"(",
"R",
",",
"self",
",",
"category",
"=",
"parent",
".",
"homset_category",
"(",
")",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/rings/polynomial/polynomial_quotient_ring.py#L530-L576
|
||
rossant/galry
|
6201fa32fb5c9ef3cea700cc22caf52fb69ebe31
|
galry/glrenderer.py
|
python
|
GLVisualRenderer.initialize_index
|
(self, name)
|
[] |
def initialize_index(self, name):
variable = self.get_variable(name)
variable['buffer'] = Attribute.create()
|
[
"def",
"initialize_index",
"(",
"self",
",",
"name",
")",
":",
"variable",
"=",
"self",
".",
"get_variable",
"(",
"name",
")",
"variable",
"[",
"'buffer'",
"]",
"=",
"Attribute",
".",
"create",
"(",
")"
] |
https://github.com/rossant/galry/blob/6201fa32fb5c9ef3cea700cc22caf52fb69ebe31/galry/glrenderer.py#L860-L862
|
||||
pantsbuild/pex
|
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
|
pex/vendor/_vendored/packaging/pyparsing.py
|
python
|
ParserElement.__req__
|
(self, other)
|
return self == other
|
[] |
def __req__(self, other):
return self == other
|
[
"def",
"__req__",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
"==",
"other"
] |
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/packaging/pyparsing.py#L2602-L2603
|
|||
jceb/vim-orgmode
|
7882e202a3115a07be5300fd596194c94d622911
|
ftplugin/orgmode/plugins/Export.py
|
python
|
Export.__init__
|
(self)
|
u""" Initialize plugin
|
u""" Initialize plugin
|
[
"u",
"Initialize",
"plugin"
] |
def __init__(self):
u""" Initialize plugin """
object.__init__(self)
# menu entries this plugin should create
self.menu = ORGMODE.orgmenu + Submenu(u'Export')
# key bindings for this plugin
# key bindings are also registered through the menu so only additional
# bindings should be put in this variable
self.keybindings = []
# commands for this plugin
self.commands = []
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"object",
".",
"__init__",
"(",
"self",
")",
"# menu entries this plugin should create",
"self",
".",
"menu",
"=",
"ORGMODE",
".",
"orgmenu",
"+",
"Submenu",
"(",
"u'Export'",
")",
"# key bindings for this plugin",
"# key bindings are also registered through the menu so only additional",
"# bindings should be put in this variable",
"self",
".",
"keybindings",
"=",
"[",
"]",
"# commands for this plugin",
"self",
".",
"commands",
"=",
"[",
"]"
] |
https://github.com/jceb/vim-orgmode/blob/7882e202a3115a07be5300fd596194c94d622911/ftplugin/orgmode/plugins/Export.py#L27-L39
|
||
pylast/pylast
|
a204055798e05364965e321e8d8755c1e223a464
|
src/pylast/__init__.py
|
python
|
User.get_image
|
(self, size=SIZE_EXTRA_LARGE)
|
return _extract_all(doc, "image")[size]
|
Returns the user's avatar
size can be one of:
SIZE_EXTRA_LARGE
SIZE_LARGE
SIZE_MEDIUM
SIZE_SMALL
|
Returns the user's avatar
size can be one of:
SIZE_EXTRA_LARGE
SIZE_LARGE
SIZE_MEDIUM
SIZE_SMALL
|
[
"Returns",
"the",
"user",
"s",
"avatar",
"size",
"can",
"be",
"one",
"of",
":",
"SIZE_EXTRA_LARGE",
"SIZE_LARGE",
"SIZE_MEDIUM",
"SIZE_SMALL"
] |
def get_image(self, size=SIZE_EXTRA_LARGE):
"""
Returns the user's avatar
size can be one of:
SIZE_EXTRA_LARGE
SIZE_LARGE
SIZE_MEDIUM
SIZE_SMALL
"""
doc = self._request(self.ws_prefix + ".getInfo", True)
return _extract_all(doc, "image")[size]
|
[
"def",
"get_image",
"(",
"self",
",",
"size",
"=",
"SIZE_EXTRA_LARGE",
")",
":",
"doc",
"=",
"self",
".",
"_request",
"(",
"self",
".",
"ws_prefix",
"+",
"\".getInfo\"",
",",
"True",
")",
"return",
"_extract_all",
"(",
"doc",
",",
"\"image\"",
")",
"[",
"size",
"]"
] |
https://github.com/pylast/pylast/blob/a204055798e05364965e321e8d8755c1e223a464/src/pylast/__init__.py#L2580-L2592
|
|
JaniceWuo/MovieRecommend
|
4c86db64ca45598917d304f535413df3bc9fea65
|
movierecommend/venv1/Lib/site-packages/django/template/defaulttags.py
|
python
|
now
|
(parser, token)
|
return NowNode(format_string, asvar)
|
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
|
Displays the date, formatted according to the given string.
|
[
"Displays",
"the",
"date",
"formatted",
"according",
"to",
"the",
"given",
"string",
"."
] |
def now(parser, token):
"""
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
asvar = None
if len(bits) == 4 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string, asvar)
|
[
"def",
"now",
"(",
"parser",
",",
"token",
")",
":",
"bits",
"=",
"token",
".",
"split_contents",
"(",
")",
"asvar",
"=",
"None",
"if",
"len",
"(",
"bits",
")",
"==",
"4",
"and",
"bits",
"[",
"-",
"2",
"]",
"==",
"'as'",
":",
"asvar",
"=",
"bits",
"[",
"-",
"1",
"]",
"bits",
"=",
"bits",
"[",
":",
"-",
"2",
"]",
"if",
"len",
"(",
"bits",
")",
"!=",
"2",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"'now' statement takes one argument\"",
")",
"format_string",
"=",
"bits",
"[",
"1",
"]",
"[",
"1",
":",
"-",
"1",
"]",
"return",
"NowNode",
"(",
"format_string",
",",
"asvar",
")"
] |
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/template/defaulttags.py#L1148-L1167
|
|
google-research/motion_imitation
|
d0e7b963c5a301984352d25a3ee0820266fa4218
|
motion_imitation/envs/env_wrappers/imitation_task.py
|
python
|
ImitationTask._get_joint_vel_size
|
(self, j)
|
return vel_size
|
Get the size of the velocity data for a give joint in a velocity array.
|
Get the size of the velocity data for a give joint in a velocity array.
|
[
"Get",
"the",
"size",
"of",
"the",
"velocity",
"data",
"for",
"a",
"give",
"joint",
"in",
"a",
"velocity",
"array",
"."
] |
def _get_joint_vel_size(self, j):
"""Get the size of the velocity data for a give joint in a velocity array."""
vel_size = self._joint_vel_size[j]
assert (vel_size == 1 or
vel_size == 0), "Only support 1D and 0D joints at the moment."
return vel_size
|
[
"def",
"_get_joint_vel_size",
"(",
"self",
",",
"j",
")",
":",
"vel_size",
"=",
"self",
".",
"_joint_vel_size",
"[",
"j",
"]",
"assert",
"(",
"vel_size",
"==",
"1",
"or",
"vel_size",
"==",
"0",
")",
",",
"\"Only support 1D and 0D joints at the moment.\"",
"return",
"vel_size"
] |
https://github.com/google-research/motion_imitation/blob/d0e7b963c5a301984352d25a3ee0820266fa4218/motion_imitation/envs/env_wrappers/imitation_task.py#L831-L836
|
|
fancompute/neuroptica
|
7bc3c152f2713780b88e701744b0541175b12111
|
neuroptica/nonlinearities.py
|
python
|
ComplexNonlinearity.dIm_dIm
|
(self, a: np.ndarray, b: np.ndarray)
|
Gives the derivative of the imaginary part of the nonlienarity w.r.t. the imaginary part of the input
|
Gives the derivative of the imaginary part of the nonlienarity w.r.t. the imaginary part of the input
|
[
"Gives",
"the",
"derivative",
"of",
"the",
"imaginary",
"part",
"of",
"the",
"nonlienarity",
"w",
".",
"r",
".",
"t",
".",
"the",
"imaginary",
"part",
"of",
"the",
"input"
] |
def dIm_dIm(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
'''Gives the derivative of the imaginary part of the nonlienarity w.r.t. the imaginary part of the input'''
raise NotImplementedError
|
[
"def",
"dIm_dIm",
"(",
"self",
",",
"a",
":",
"np",
".",
"ndarray",
",",
"b",
":",
"np",
".",
"ndarray",
")",
"->",
"np",
".",
"ndarray",
":",
"raise",
"NotImplementedError"
] |
https://github.com/fancompute/neuroptica/blob/7bc3c152f2713780b88e701744b0541175b12111/neuroptica/nonlinearities.py#L116-L118
|
||
kkuette/TradzQAI
|
1f46cb2536b24cb3716250f1e9705daa76af4f60
|
TradzQAI/API/cbpro/authenticated_client.py
|
python
|
AuthenticatedClient.get_order
|
(self, order_id)
|
return self._send_message('get', '/orders/' + order_id)
|
Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
|
Get a single order by order id.
|
[
"Get",
"a",
"single",
"order",
"by",
"order",
"id",
"."
] |
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
|
[
"def",
"get_order",
"(",
"self",
",",
"order_id",
")",
":",
"return",
"self",
".",
"_send_message",
"(",
"'get'",
",",
"'/orders/'",
"+",
"order_id",
")"
] |
https://github.com/kkuette/TradzQAI/blob/1f46cb2536b24cb3716250f1e9705daa76af4f60/TradzQAI/API/cbpro/authenticated_client.py#L485-L518
|
|
ybabakhin/kaggle_salt_bes_phalanx
|
2f81d4dd8d50a01579e5f7650259dde92c5c3b8d
|
bes/losses.py
|
python
|
flatten_binary_scores
|
(scores, labels, ignore=None)
|
return vscores, vlabels
|
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
|
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
|
[
"Flattens",
"predictions",
"in",
"the",
"batch",
"(",
"binary",
"case",
")",
"Remove",
"labels",
"equal",
"to",
"ignore"
] |
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = tf.reshape(scores, (-1,))
labels = tf.reshape(labels, (-1,))
if ignore is None:
return scores, labels
valid = tf.not_equal(labels, ignore)
vscores = tf.boolean_mask(scores, valid, name='valid_scores')
vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
return vscores, vlabels
|
[
"def",
"flatten_binary_scores",
"(",
"scores",
",",
"labels",
",",
"ignore",
"=",
"None",
")",
":",
"scores",
"=",
"tf",
".",
"reshape",
"(",
"scores",
",",
"(",
"-",
"1",
",",
")",
")",
"labels",
"=",
"tf",
".",
"reshape",
"(",
"labels",
",",
"(",
"-",
"1",
",",
")",
")",
"if",
"ignore",
"is",
"None",
":",
"return",
"scores",
",",
"labels",
"valid",
"=",
"tf",
".",
"not_equal",
"(",
"labels",
",",
"ignore",
")",
"vscores",
"=",
"tf",
".",
"boolean_mask",
"(",
"scores",
",",
"valid",
",",
"name",
"=",
"'valid_scores'",
")",
"vlabels",
"=",
"tf",
".",
"boolean_mask",
"(",
"labels",
",",
"valid",
",",
"name",
"=",
"'valid_labels'",
")",
"return",
"vscores",
",",
"vlabels"
] |
https://github.com/ybabakhin/kaggle_salt_bes_phalanx/blob/2f81d4dd8d50a01579e5f7650259dde92c5c3b8d/bes/losses.py#L181-L193
|
|
i3visio/osrframework
|
e02a6e9b1346ab5a01244c0d19bcec8232bf1a37
|
osrframework/wrappers/mstdn_jp.py
|
python
|
MstdnJP.__init__
|
(self)
|
Constructor with parameters
This method permits the developer to instantiate dinamically Platform
objects.
|
Constructor with parameters
|
[
"Constructor",
"with",
"parameters"
] |
def __init__(self):
"""Constructor with parameters
This method permits the developer to instantiate dinamically Platform
objects."""
self.platformName = "MstdnJP"
self.tags = ["social", "mastodon"]
self.modes = {
"usufy": {
"debug": False,
"extra_fields": {
"com.i3visio.Name": "<strong class='display-name__html p-name emojify'>([^<]+)</strong>", # Regular expresion to extract the alias
},
"needs_credentials": False,
"not_found_text": "<img alt='Mastodon' src='/oops.png'>", # Text that indicates a missing profile
"query_validator": "[a-z0-9A-Z_]+", # Regular expression that the alias SHOULD match
"url": "https://mstdn.jp/@{placeholder}", # Target URL where {placeholder} would be modified by the alias
}
}
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"platformName",
"=",
"\"MstdnJP\"",
"self",
".",
"tags",
"=",
"[",
"\"social\"",
",",
"\"mastodon\"",
"]",
"self",
".",
"modes",
"=",
"{",
"\"usufy\"",
":",
"{",
"\"debug\"",
":",
"False",
",",
"\"extra_fields\"",
":",
"{",
"\"com.i3visio.Name\"",
":",
"\"<strong class='display-name__html p-name emojify'>([^<]+)</strong>\"",
",",
"# Regular expresion to extract the alias",
"}",
",",
"\"needs_credentials\"",
":",
"False",
",",
"\"not_found_text\"",
":",
"\"<img alt='Mastodon' src='/oops.png'>\"",
",",
"# Text that indicates a missing profile",
"\"query_validator\"",
":",
"\"[a-z0-9A-Z_]+\"",
",",
"# Regular expression that the alias SHOULD match",
"\"url\"",
":",
"\"https://mstdn.jp/@{placeholder}\"",
",",
"# Target URL where {placeholder} would be modified by the alias",
"}",
"}"
] |
https://github.com/i3visio/osrframework/blob/e02a6e9b1346ab5a01244c0d19bcec8232bf1a37/osrframework/wrappers/mstdn_jp.py#L29-L47
|
||
nschaetti/EchoTorch
|
cba209c49e0fda73172d2e853b85c747f9f5117e
|
echotorch/data/datasets/CopyTaskDataset.py
|
python
|
CopyTaskDataset.data
|
(self)
|
return self.samples
|
Get the whole dataset as a list
@return: A tuple of list
|
Get the whole dataset as a list
|
[
"Get",
"the",
"whole",
"dataset",
"as",
"a",
"list"
] |
def data(self) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
Get the whole dataset as a list
@return: A tuple of list
"""
return self.samples
|
[
"def",
"data",
"(",
"self",
")",
"->",
"Tuple",
"[",
"List",
"[",
"torch",
".",
"Tensor",
"]",
",",
"List",
"[",
"torch",
".",
"Tensor",
"]",
"]",
":",
"return",
"self",
".",
"samples"
] |
https://github.com/nschaetti/EchoTorch/blob/cba209c49e0fda73172d2e853b85c747f9f5117e/echotorch/data/datasets/CopyTaskDataset.py#L71-L76
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/whoosh/matching/wrappers.py
|
python
|
FilterMatcher.copy
|
(self)
|
return self.__class__(self.child.copy(), self._ids, self._exclude,
boost=self.boost)
|
[] |
def copy(self):
return self.__class__(self.child.copy(), self._ids, self._exclude,
boost=self.boost)
|
[
"def",
"copy",
"(",
"self",
")",
":",
"return",
"self",
".",
"__class__",
"(",
"self",
".",
"child",
".",
"copy",
"(",
")",
",",
"self",
".",
"_ids",
",",
"self",
".",
"_exclude",
",",
"boost",
"=",
"self",
".",
"boost",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/whoosh/matching/wrappers.py#L288-L290
|
|||
pypa/pipenv
|
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
|
pipenv/patched/notpip/_vendor/distlib/database.py
|
python
|
InstalledDistribution.get_distinfo_resource
|
(self, path)
|
return finder.find(path)
|
[] |
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
|
[
"def",
"get_distinfo_resource",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
"not",
"in",
"DIST_FILES",
":",
"raise",
"DistlibException",
"(",
"'invalid path for a dist-info file: '",
"'%r at %r'",
"%",
"(",
"path",
",",
"self",
".",
"path",
")",
")",
"finder",
"=",
"resources",
".",
"finder_for_path",
"(",
"self",
".",
"path",
")",
"if",
"finder",
"is",
"None",
":",
"raise",
"DistlibException",
"(",
"'Unable to get a finder for %s'",
"%",
"self",
".",
"path",
")",
"return",
"finder",
".",
"find",
"(",
"path",
")"
] |
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/patched/notpip/_vendor/distlib/database.py#L795-L802
|
|||
MaslowCNC/GroundControl
|
294a05dea5b9753383e24b07ea47d78e76e49422
|
UIElements/viewMenu.py
|
python
|
ViewMenu.load
|
(self, instance)
|
Load A File (Any Type)
Takes in a file path (from pop-up) and handles the file appropriately for the given file-type.
|
Load A File (Any Type)
Takes in a file path (from pop-up) and handles the file appropriately for the given file-type.
|
[
"Load",
"A",
"File",
"(",
"Any",
"Type",
")",
"Takes",
"in",
"a",
"file",
"path",
"(",
"from",
"pop",
"-",
"up",
")",
"and",
"handles",
"the",
"file",
"appropriately",
"for",
"the",
"given",
"file",
"-",
"type",
"."
] |
def load(self, instance):
'''
Load A File (Any Type)
Takes in a file path (from pop-up) and handles the file appropriately for the given file-type.
'''
try:
filename = instance.selection[0]
except IndexError:
print("must choose a file...")
else:
print(filename)
#close the open file popup
self.dismiss_popup()
#locate the file
self.data.gcodeFile = filename
self.data.config.set('Maslow Settings', 'openFile', str(self.data.gcodeFile))
#close the parent popup
self.parentWidget.close()
|
[
"def",
"load",
"(",
"self",
",",
"instance",
")",
":",
"try",
":",
"filename",
"=",
"instance",
".",
"selection",
"[",
"0",
"]",
"except",
"IndexError",
":",
"print",
"(",
"\"must choose a file...\"",
")",
"else",
":",
"print",
"(",
"filename",
")",
"#close the open file popup",
"self",
".",
"dismiss_popup",
"(",
")",
"#locate the file",
"self",
".",
"data",
".",
"gcodeFile",
"=",
"filename",
"self",
".",
"data",
".",
"config",
".",
"set",
"(",
"'Maslow Settings'",
",",
"'openFile'",
",",
"str",
"(",
"self",
".",
"data",
".",
"gcodeFile",
")",
")",
"#close the parent popup",
"self",
".",
"parentWidget",
".",
"close",
"(",
")"
] |
https://github.com/MaslowCNC/GroundControl/blob/294a05dea5b9753383e24b07ea47d78e76e49422/UIElements/viewMenu.py#L75-L99
|
||
quodlibet/mutagen
|
399513b167ed00c4b7a9ef98dfe591a276efb701
|
mutagen/id3/_tags.py
|
python
|
ID3Tags.loaded_frame
|
(self, tag)
|
Deprecated; use the add method.
|
Deprecated; use the add method.
|
[
"Deprecated",
";",
"use",
"the",
"add",
"method",
"."
] |
def loaded_frame(self, tag):
"""Deprecated; use the add method."""
self._add(tag, True)
|
[
"def",
"loaded_frame",
"(",
"self",
",",
"tag",
")",
":",
"self",
".",
"_add",
"(",
"tag",
",",
"True",
")"
] |
https://github.com/quodlibet/mutagen/blob/399513b167ed00c4b7a9ef98dfe591a276efb701/mutagen/id3/_tags.py#L323-L326
|
||
ales-tsurko/cells
|
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
|
packaging/macos/python/lib/python3.7/mailbox.py
|
python
|
MH.__setitem__
|
(self, key, message)
|
Replace the keyed message; raise KeyError if it doesn't exist.
|
Replace the keyed message; raise KeyError if it doesn't exist.
|
[
"Replace",
"the",
"keyed",
"message",
";",
"raise",
"KeyError",
"if",
"it",
"doesn",
"t",
"exist",
"."
] |
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
self._dump_message(message, f)
if isinstance(message, MHMessage):
self._dump_sequences(message, key)
finally:
if self._locked:
_unlock_file(f)
finally:
_sync_close(f)
|
[
"def",
"__setitem__",
"(",
"self",
",",
"key",
",",
"message",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_path",
",",
"str",
"(",
"key",
")",
")",
"try",
":",
"f",
"=",
"open",
"(",
"path",
",",
"'rb+'",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"raise",
"KeyError",
"(",
"'No message with key: %s'",
"%",
"key",
")",
"else",
":",
"raise",
"try",
":",
"if",
"self",
".",
"_locked",
":",
"_lock_file",
"(",
"f",
")",
"try",
":",
"os",
".",
"close",
"(",
"os",
".",
"open",
"(",
"path",
",",
"os",
".",
"O_WRONLY",
"|",
"os",
".",
"O_TRUNC",
")",
")",
"self",
".",
"_dump_message",
"(",
"message",
",",
"f",
")",
"if",
"isinstance",
"(",
"message",
",",
"MHMessage",
")",
":",
"self",
".",
"_dump_sequences",
"(",
"message",
",",
"key",
")",
"finally",
":",
"if",
"self",
".",
"_locked",
":",
"_unlock_file",
"(",
"f",
")",
"finally",
":",
"_sync_close",
"(",
"f",
")"
] |
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/mailbox.py#L995-L1017
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.