repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
gwastro/pycbc-glue
|
pycbc_glue/ligolw/lsctables.py
|
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/lsctables.py#L2121-L2128
|
def get_slide_number(self):
"""
Return the slide-number for this trigger
"""
a, slide_number, b = self.get_id_parts()
if slide_number > 5000:
slide_number = 5000 - slide_number
return slide_number
|
[
"def",
"get_slide_number",
"(",
"self",
")",
":",
"a",
",",
"slide_number",
",",
"b",
"=",
"self",
".",
"get_id_parts",
"(",
")",
"if",
"slide_number",
">",
"5000",
":",
"slide_number",
"=",
"5000",
"-",
"slide_number",
"return",
"slide_number"
] |
Return the slide-number for this trigger
|
[
"Return",
"the",
"slide",
"-",
"number",
"for",
"this",
"trigger"
] |
python
|
train
| 25.5 |
saltstack/salt
|
salt/modules/freebsdkmod.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/freebsdkmod.py#L83-L91
|
def _set_persistent_module(mod):
'''
Add a module to loader.conf to make it persistent.
'''
if not mod or mod in mod_list(True) or mod not in \
available():
return set()
__salt__['file.append'](_LOADER_CONF, _LOAD_MODULE.format(mod))
return set([mod])
|
[
"def",
"_set_persistent_module",
"(",
"mod",
")",
":",
"if",
"not",
"mod",
"or",
"mod",
"in",
"mod_list",
"(",
"True",
")",
"or",
"mod",
"not",
"in",
"available",
"(",
")",
":",
"return",
"set",
"(",
")",
"__salt__",
"[",
"'file.append'",
"]",
"(",
"_LOADER_CONF",
",",
"_LOAD_MODULE",
".",
"format",
"(",
"mod",
")",
")",
"return",
"set",
"(",
"[",
"mod",
"]",
")"
] |
Add a module to loader.conf to make it persistent.
|
[
"Add",
"a",
"module",
"to",
"loader",
".",
"conf",
"to",
"make",
"it",
"persistent",
"."
] |
python
|
train
| 31.888889 |
AdvancedClimateSystems/uModbus
|
umodbus/utils.py
|
https://github.com/AdvancedClimateSystems/uModbus/blob/0560a42308003f4072d988f28042b8d55b694ad4/umodbus/utils.py#L10-L23
|
def log_to_stream(stream=sys.stderr, level=logging.NOTSET,
fmt=logging.BASIC_FORMAT):
""" Add :class:`logging.StreamHandler` to logger which logs to a stream.
:param stream. Stream to log to, default STDERR.
:param level: Log level, default NOTSET.
:param fmt: String with log format, default is BASIC_FORMAT.
"""
fmt = Formatter(fmt)
handler = StreamHandler()
handler.setFormatter(fmt)
handler.setLevel(level)
log.addHandler(handler)
|
[
"def",
"log_to_stream",
"(",
"stream",
"=",
"sys",
".",
"stderr",
",",
"level",
"=",
"logging",
".",
"NOTSET",
",",
"fmt",
"=",
"logging",
".",
"BASIC_FORMAT",
")",
":",
"fmt",
"=",
"Formatter",
"(",
"fmt",
")",
"handler",
"=",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"fmt",
")",
"handler",
".",
"setLevel",
"(",
"level",
")",
"log",
".",
"addHandler",
"(",
"handler",
")"
] |
Add :class:`logging.StreamHandler` to logger which logs to a stream.
:param stream. Stream to log to, default STDERR.
:param level: Log level, default NOTSET.
:param fmt: String with log format, default is BASIC_FORMAT.
|
[
"Add",
":",
"class",
":",
"logging",
".",
"StreamHandler",
"to",
"logger",
"which",
"logs",
"to",
"a",
"stream",
"."
] |
python
|
train
| 34.357143 |
GNS3/gns3-server
|
gns3server/utils/images.py
|
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/images.py#L188-L195
|
def remove_checksum(path):
"""
Remove the checksum of an image from cache if exists
"""
path = '{}.md5sum'.format(path)
if os.path.exists(path):
os.remove(path)
|
[
"def",
"remove_checksum",
"(",
"path",
")",
":",
"path",
"=",
"'{}.md5sum'",
".",
"format",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"remove",
"(",
"path",
")"
] |
Remove the checksum of an image from cache if exists
|
[
"Remove",
"the",
"checksum",
"of",
"an",
"image",
"from",
"cache",
"if",
"exists"
] |
python
|
train
| 22.75 |
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L264-L271
|
def addSourceGroup(self, sourceGroupUri, weight):
"""
add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceGroups"].append({"uri": sourceGroupUri, "wgt": weight})
|
[
"def",
"addSourceGroup",
"(",
"self",
",",
"sourceGroupUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"sourceGroups\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"sourceGroupUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] |
add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50)
|
[
"add",
"a",
"list",
"of",
"relevant",
"sources",
"by",
"specifying",
"a",
"whole",
"source",
"group",
"to",
"the",
"topic",
"page"
] |
python
|
train
| 63.125 |
valohai/valohai-yaml
|
valohai_yaml/objs/step.py
|
https://github.com/valohai/valohai-yaml/blob/3d2e92381633d84cdba039f6905df34c9633a2e1/valohai_yaml/objs/step.py#L92-L118
|
def build_command(self, parameter_values, command=None):
"""
Build the command for this step using the given parameter values.
Even if the original configuration only declared a single `command`,
this function will return a list of shell commands. It is the caller's
responsibility to concatenate them, likely using the semicolon or
double ampersands.
It is also possible to override the `command`.
:param parameter_values: Parameter values to augment any parameter defaults.
:type parameter_values: dict[str, object]
:param command: Overriding command; leave falsy to not override.
:type command: str|list[str]|None
:return: list of commands
:rtype: list[str]
"""
command = (command or self.command)
# merge defaults with passed values
# ignore flag default values as they are special
# undefined flag will remain undefined regardless of default value
values = dict(self.get_parameter_defaults(include_flags=False), **parameter_values)
parameter_map = ParameterMap(parameters=self.parameters, values=values)
return build_command(command, parameter_map)
|
[
"def",
"build_command",
"(",
"self",
",",
"parameter_values",
",",
"command",
"=",
"None",
")",
":",
"command",
"=",
"(",
"command",
"or",
"self",
".",
"command",
")",
"# merge defaults with passed values",
"# ignore flag default values as they are special",
"# undefined flag will remain undefined regardless of default value",
"values",
"=",
"dict",
"(",
"self",
".",
"get_parameter_defaults",
"(",
"include_flags",
"=",
"False",
")",
",",
"*",
"*",
"parameter_values",
")",
"parameter_map",
"=",
"ParameterMap",
"(",
"parameters",
"=",
"self",
".",
"parameters",
",",
"values",
"=",
"values",
")",
"return",
"build_command",
"(",
"command",
",",
"parameter_map",
")"
] |
Build the command for this step using the given parameter values.
Even if the original configuration only declared a single `command`,
this function will return a list of shell commands. It is the caller's
responsibility to concatenate them, likely using the semicolon or
double ampersands.
It is also possible to override the `command`.
:param parameter_values: Parameter values to augment any parameter defaults.
:type parameter_values: dict[str, object]
:param command: Overriding command; leave falsy to not override.
:type command: str|list[str]|None
:return: list of commands
:rtype: list[str]
|
[
"Build",
"the",
"command",
"for",
"this",
"step",
"using",
"the",
"given",
"parameter",
"values",
"."
] |
python
|
train
| 44.481481 |
ckoepp/TwitterSearch
|
TwitterSearch/TwitterSearch.py
|
https://github.com/ckoepp/TwitterSearch/blob/627b9f519d49faf6b83859717f9082b3b2622aaf/TwitterSearch/TwitterSearch.py#L151-L170
|
def authenticate(self, verify=True):
""" Creates an authenticated and internal oauth2 handler needed for \
queries to Twitter and verifies credentials if needed. If ``verify`` \
is true, it also checks if the user credentials are valid. \
The **default** value is *True*
:param verify: boolean variable to \
directly check. Default value is ``True``
"""
self.__oauth = OAuth1(self.__consumer_key,
client_secret=self.__consumer_secret,
resource_owner_key=self.__access_token,
resource_owner_secret=self.__access_token_secret)
if verify:
r = requests.get(self._base_url + self._verify_url,
auth=self.__oauth,
proxies={"https": self.__proxy})
self.check_http_status(r.status_code)
|
[
"def",
"authenticate",
"(",
"self",
",",
"verify",
"=",
"True",
")",
":",
"self",
".",
"__oauth",
"=",
"OAuth1",
"(",
"self",
".",
"__consumer_key",
",",
"client_secret",
"=",
"self",
".",
"__consumer_secret",
",",
"resource_owner_key",
"=",
"self",
".",
"__access_token",
",",
"resource_owner_secret",
"=",
"self",
".",
"__access_token_secret",
")",
"if",
"verify",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"_base_url",
"+",
"self",
".",
"_verify_url",
",",
"auth",
"=",
"self",
".",
"__oauth",
",",
"proxies",
"=",
"{",
"\"https\"",
":",
"self",
".",
"__proxy",
"}",
")",
"self",
".",
"check_http_status",
"(",
"r",
".",
"status_code",
")"
] |
Creates an authenticated and internal oauth2 handler needed for \
queries to Twitter and verifies credentials if needed. If ``verify`` \
is true, it also checks if the user credentials are valid. \
The **default** value is *True*
:param verify: boolean variable to \
directly check. Default value is ``True``
|
[
"Creates",
"an",
"authenticated",
"and",
"internal",
"oauth2",
"handler",
"needed",
"for",
"\\",
"queries",
"to",
"Twitter",
"and",
"verifies",
"credentials",
"if",
"needed",
".",
"If",
"verify",
"\\",
"is",
"true",
"it",
"also",
"checks",
"if",
"the",
"user",
"credentials",
"are",
"valid",
".",
"\\",
"The",
"**",
"default",
"**",
"value",
"is",
"*",
"True",
"*"
] |
python
|
train
| 45.35 |
tensorflow/tensor2tensor
|
tensor2tensor/layers/common_layers.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L2322-L2386
|
def sru(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
if is_xla_compiled(): # On TPU the XLA does a good job with while.
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
try:
from tensorflow.contrib.recurrent.python.ops import functional_rnn # pylint: disable=g-import-not-at-top
except ImportError:
tf.logging.info("functional_rnn not found, using sru_with_scan instead")
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
cell = CumsumprodCell(initial_state)
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
layers().Dense(3 * x_shape[-1], name="kernel_%d" % i)(x), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
concat = tf.concat([x_times_one_minus_f, f], axis=-1)
c_states, _ = functional_rnn.functional_rnn(
cell, concat, time_major=False)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
return tf.reshape(x, x_shape)
|
[
"def",
"sru",
"(",
"x",
",",
"num_layers",
"=",
"2",
",",
"activation",
"=",
"None",
",",
"initial_state",
"=",
"None",
",",
"name",
"=",
"None",
",",
"reuse",
"=",
"None",
")",
":",
"if",
"num_layers",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Number of layers must be positive: %d\"",
"%",
"num_layers",
")",
"if",
"is_xla_compiled",
"(",
")",
":",
"# On TPU the XLA does a good job with while.",
"return",
"sru_with_scan",
"(",
"x",
",",
"num_layers",
",",
"activation",
",",
"initial_state",
",",
"name",
",",
"reuse",
")",
"try",
":",
"from",
"tensorflow",
".",
"contrib",
".",
"recurrent",
".",
"python",
".",
"ops",
"import",
"functional_rnn",
"# pylint: disable=g-import-not-at-top",
"except",
"ImportError",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"functional_rnn not found, using sru_with_scan instead\"",
")",
"return",
"sru_with_scan",
"(",
"x",
",",
"num_layers",
",",
"activation",
",",
"initial_state",
",",
"name",
",",
"reuse",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"sru\"",
",",
"values",
"=",
"[",
"x",
"]",
",",
"reuse",
"=",
"reuse",
")",
":",
"# We assume x is [batch, ..., channels] and treat all ... as time.",
"x_shape",
"=",
"shape_list",
"(",
"x",
")",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"x_shape",
"[",
"0",
"]",
",",
"-",
"1",
",",
"x_shape",
"[",
"-",
"1",
"]",
"]",
")",
"initial_state",
"=",
"initial_state",
"or",
"tf",
".",
"zeros",
"(",
"[",
"x_shape",
"[",
"0",
"]",
",",
"x_shape",
"[",
"-",
"1",
"]",
"]",
")",
"cell",
"=",
"CumsumprodCell",
"(",
"initial_state",
")",
"# Calculate SRU on each layer.",
"for",
"i",
"in",
"range",
"(",
"num_layers",
")",
":",
"# The parallel part of the SRU.",
"x_orig",
"=",
"x",
"x",
",",
"f",
",",
"r",
"=",
"tf",
".",
"split",
"(",
"layers",
"(",
")",
".",
"Dense",
"(",
"3",
"*",
"x_shape",
"[",
"-",
"1",
"]",
",",
"name",
"=",
"\"kernel_%d\"",
"%",
"i",
")",
"(",
"x",
")",
",",
"3",
",",
"axis",
"=",
"-",
"1",
")",
"f",
",",
"r",
"=",
"tf",
".",
"sigmoid",
"(",
"f",
")",
",",
"tf",
".",
"sigmoid",
"(",
"r",
")",
"x_times_one_minus_f",
"=",
"x",
"*",
"(",
"1.0",
"-",
"f",
")",
"# Compute in parallel for speed.",
"# Calculate states.",
"concat",
"=",
"tf",
".",
"concat",
"(",
"[",
"x_times_one_minus_f",
",",
"f",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"c_states",
",",
"_",
"=",
"functional_rnn",
".",
"functional_rnn",
"(",
"cell",
",",
"concat",
",",
"time_major",
"=",
"False",
")",
"# Final output.",
"if",
"activation",
"is",
"not",
"None",
":",
"c_states",
"=",
"activation",
"(",
"c_states",
")",
"h",
"=",
"c_states",
"*",
"r",
"+",
"(",
"1.0",
"-",
"r",
")",
"*",
"x_orig",
"x",
"=",
"h",
"# Next layer.",
"return",
"tf",
".",
"reshape",
"(",
"x",
",",
"x_shape",
")"
] |
SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
|
[
"SRU",
"cell",
"as",
"in",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1709",
".",
"02755",
"."
] |
python
|
train
| 38.384615 |
learningequality/morango
|
morango/signals.py
|
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/signals.py#L8-L14
|
def add_to_deleted_models(sender, instance=None, *args, **kwargs):
"""
Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark
the model as deleted in the store.
"""
if issubclass(sender, SyncableModel):
instance._update_deleted_models()
|
[
"def",
"add_to_deleted_models",
"(",
"sender",
",",
"instance",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"issubclass",
"(",
"sender",
",",
"SyncableModel",
")",
":",
"instance",
".",
"_update_deleted_models",
"(",
")"
] |
Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark
the model as deleted in the store.
|
[
"Whenever",
"a",
"model",
"is",
"deleted",
"we",
"record",
"its",
"ID",
"in",
"a",
"separate",
"model",
"for",
"tracking",
"purposes",
".",
"During",
"serialization",
"we",
"will",
"mark",
"the",
"model",
"as",
"deleted",
"in",
"the",
"store",
"."
] |
python
|
valid
| 46.714286 |
inveniosoftware/invenio-access
|
invenio_access/ext.py
|
https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/ext.py#L82-L93
|
def delete_action_cache(self, action_key):
"""Delete action needs and excludes from cache.
.. note:: It returns the action if a cache system is defined.
:param action_key: The unique action name.
"""
if self.cache:
self.cache.delete(
self.app.config['ACCESS_ACTION_CACHE_PREFIX'] +
action_key
)
|
[
"def",
"delete_action_cache",
"(",
"self",
",",
"action_key",
")",
":",
"if",
"self",
".",
"cache",
":",
"self",
".",
"cache",
".",
"delete",
"(",
"self",
".",
"app",
".",
"config",
"[",
"'ACCESS_ACTION_CACHE_PREFIX'",
"]",
"+",
"action_key",
")"
] |
Delete action needs and excludes from cache.
.. note:: It returns the action if a cache system is defined.
:param action_key: The unique action name.
|
[
"Delete",
"action",
"needs",
"and",
"excludes",
"from",
"cache",
"."
] |
python
|
train
| 31.75 |
saltstack/salt
|
salt/modules/zookeeper.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zookeeper.py#L428-L474
|
def set_acls(path, acls, version=-1, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
'''
Set acls on a znode
path
path to znode
acls
list of acl dictionaries to set on the znode
version
only set acls if version matches (Default: -1 (always matches))
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.set_acls /test/name acls='[{"username": "gtmanfred", "password": "test", "all": True}]' profile=prod
'''
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
if acls is None:
acls = []
acls = [make_digest_acl(**acl) for acl in acls]
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
return conn.set_acls(path, acls, version)
|
[
"def",
"set_acls",
"(",
"path",
",",
"acls",
",",
"version",
"=",
"-",
"1",
",",
"profile",
"=",
"None",
",",
"hosts",
"=",
"None",
",",
"scheme",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"default_acl",
"=",
"None",
")",
":",
"conn",
"=",
"_get_zk_conn",
"(",
"profile",
"=",
"profile",
",",
"hosts",
"=",
"hosts",
",",
"scheme",
"=",
"scheme",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"default_acl",
"=",
"default_acl",
")",
"if",
"acls",
"is",
"None",
":",
"acls",
"=",
"[",
"]",
"acls",
"=",
"[",
"make_digest_acl",
"(",
"*",
"*",
"acl",
")",
"for",
"acl",
"in",
"acls",
"]",
"conn",
"=",
"_get_zk_conn",
"(",
"profile",
"=",
"profile",
",",
"hosts",
"=",
"hosts",
",",
"scheme",
"=",
"scheme",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"default_acl",
"=",
"default_acl",
")",
"return",
"conn",
".",
"set_acls",
"(",
"path",
",",
"acls",
",",
"version",
")"
] |
Set acls on a znode
path
path to znode
acls
list of acl dictionaries to set on the znode
version
only set acls if version matches (Default: -1 (always matches))
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.set_acls /test/name acls='[{"username": "gtmanfred", "password": "test", "all": True}]' profile=prod
|
[
"Set",
"acls",
"on",
"a",
"znode"
] |
python
|
train
| 29.659574 |
pypa/pipenv
|
pipenv/patched/notpip/_vendor/lockfile/pidlockfile.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/lockfile/pidlockfile.py#L176-L190
|
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
|
[
"def",
"remove_existing_pidfile",
"(",
"pidfile_path",
")",
":",
"try",
":",
"os",
".",
"remove",
"(",
"pidfile_path",
")",
"except",
"OSError",
"as",
"exc",
":",
"if",
"exc",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"pass",
"else",
":",
"raise"
] |
Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
|
[
"Remove",
"the",
"named",
"PID",
"file",
"if",
"it",
"exists",
"."
] |
python
|
train
| 26.733333 |
python-diamond/Diamond
|
src/diamond/collector.py
|
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/collector.py#L544-L553
|
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ProcessCollector, self).get_default_config()
config.update({
'use_sudo': False,
'sudo_cmd': self.find_binary('/usr/bin/sudo'),
})
return config
|
[
"def",
"get_default_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"ProcessCollector",
",",
"self",
")",
".",
"get_default_config",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'use_sudo'",
":",
"False",
",",
"'sudo_cmd'",
":",
"self",
".",
"find_binary",
"(",
"'/usr/bin/sudo'",
")",
",",
"}",
")",
"return",
"config"
] |
Returns the default collector settings
|
[
"Returns",
"the",
"default",
"collector",
"settings"
] |
python
|
train
| 31.4 |
hannes-brt/hebel
|
hebel/layers/hidden_layer.py
|
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/layers/hidden_layer.py#L264-L323
|
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
# Get cache if it wasn't provided
if cache is None:
cache = self.feed_forward(input_data,
prediction=False)
if len(cache) == 2:
activations, dropout_mask = cache
else:
activations = cache[0]
# Multiply the binary mask with the incoming gradients
if self.dropout > 0 and dropout_mask is not None:
apply_dropout_mask(df_output, dropout_mask)
# Get gradient wrt activation function
df_activations = self.df(activations)
delta = mult_matrix(df_activations, df_output)
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt inputs
df_input = linalg.dot(delta, self.W, transb='T')
# L1 weight decay
if self.l1_penalty_weight:
df_W += self.l1_penalty_weight * sign(self.W)
# L2 weight decay
if self.l2_penalty_weight:
df_W += self.l2_penalty_weight * self.W
return (df_W, df_b), df_input
|
[
"def",
"backprop",
"(",
"self",
",",
"input_data",
",",
"df_output",
",",
"cache",
"=",
"None",
")",
":",
"# Get cache if it wasn't provided",
"if",
"cache",
"is",
"None",
":",
"cache",
"=",
"self",
".",
"feed_forward",
"(",
"input_data",
",",
"prediction",
"=",
"False",
")",
"if",
"len",
"(",
"cache",
")",
"==",
"2",
":",
"activations",
",",
"dropout_mask",
"=",
"cache",
"else",
":",
"activations",
"=",
"cache",
"[",
"0",
"]",
"# Multiply the binary mask with the incoming gradients",
"if",
"self",
".",
"dropout",
">",
"0",
"and",
"dropout_mask",
"is",
"not",
"None",
":",
"apply_dropout_mask",
"(",
"df_output",
",",
"dropout_mask",
")",
"# Get gradient wrt activation function",
"df_activations",
"=",
"self",
".",
"df",
"(",
"activations",
")",
"delta",
"=",
"mult_matrix",
"(",
"df_activations",
",",
"df_output",
")",
"# Gradient wrt weights",
"df_W",
"=",
"linalg",
".",
"dot",
"(",
"input_data",
",",
"delta",
",",
"transa",
"=",
"'T'",
")",
"# Gradient wrt bias",
"df_b",
"=",
"matrix_sum_out_axis",
"(",
"delta",
",",
"0",
")",
"# Gradient wrt inputs",
"df_input",
"=",
"linalg",
".",
"dot",
"(",
"delta",
",",
"self",
".",
"W",
",",
"transb",
"=",
"'T'",
")",
"# L1 weight decay",
"if",
"self",
".",
"l1_penalty_weight",
":",
"df_W",
"+=",
"self",
".",
"l1_penalty_weight",
"*",
"sign",
"(",
"self",
".",
"W",
")",
"# L2 weight decay",
"if",
"self",
".",
"l2_penalty_weight",
":",
"df_W",
"+=",
"self",
".",
"l2_penalty_weight",
"*",
"self",
".",
"W",
"return",
"(",
"df_W",
",",
"df_b",
")",
",",
"df_input"
] |
Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
|
[
"Backpropagate",
"through",
"the",
"hidden",
"layer"
] |
python
|
train
| 31.5 |
Microsoft/nni
|
src/sdk/pynni/nni/networkmorphism_tuner/layer_transformer.py
|
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/layer_transformer.py#L142-L166
|
def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True):
'''wider next conv layer.
'''
n_dim = get_n_dim(layer)
if not weighted:
return get_conv_class(n_dim)(layer.input_channel + n_add,
layer.filters,
kernel_size=layer.kernel_size,
stride=layer.stride)
n_filters = layer.filters
teacher_w, teacher_b = layer.get_weights()
new_weight_shape = list(teacher_w.shape)
new_weight_shape[1] = n_add
new_weight = np.zeros(tuple(new_weight_shape))
student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(),
add_noise(new_weight, teacher_w),
teacher_w[:, start_dim:total_dim, ...].copy()), axis=1)
new_layer = get_conv_class(n_dim)(layer.input_channel + n_add,
n_filters,
kernel_size=layer.kernel_size,
stride=layer.stride)
new_layer.set_weights((student_w, teacher_b))
return new_layer
|
[
"def",
"wider_next_conv",
"(",
"layer",
",",
"start_dim",
",",
"total_dim",
",",
"n_add",
",",
"weighted",
"=",
"True",
")",
":",
"n_dim",
"=",
"get_n_dim",
"(",
"layer",
")",
"if",
"not",
"weighted",
":",
"return",
"get_conv_class",
"(",
"n_dim",
")",
"(",
"layer",
".",
"input_channel",
"+",
"n_add",
",",
"layer",
".",
"filters",
",",
"kernel_size",
"=",
"layer",
".",
"kernel_size",
",",
"stride",
"=",
"layer",
".",
"stride",
")",
"n_filters",
"=",
"layer",
".",
"filters",
"teacher_w",
",",
"teacher_b",
"=",
"layer",
".",
"get_weights",
"(",
")",
"new_weight_shape",
"=",
"list",
"(",
"teacher_w",
".",
"shape",
")",
"new_weight_shape",
"[",
"1",
"]",
"=",
"n_add",
"new_weight",
"=",
"np",
".",
"zeros",
"(",
"tuple",
"(",
"new_weight_shape",
")",
")",
"student_w",
"=",
"np",
".",
"concatenate",
"(",
"(",
"teacher_w",
"[",
":",
",",
":",
"start_dim",
",",
"...",
"]",
".",
"copy",
"(",
")",
",",
"add_noise",
"(",
"new_weight",
",",
"teacher_w",
")",
",",
"teacher_w",
"[",
":",
",",
"start_dim",
":",
"total_dim",
",",
"...",
"]",
".",
"copy",
"(",
")",
")",
",",
"axis",
"=",
"1",
")",
"new_layer",
"=",
"get_conv_class",
"(",
"n_dim",
")",
"(",
"layer",
".",
"input_channel",
"+",
"n_add",
",",
"n_filters",
",",
"kernel_size",
"=",
"layer",
".",
"kernel_size",
",",
"stride",
"=",
"layer",
".",
"stride",
")",
"new_layer",
".",
"set_weights",
"(",
"(",
"student_w",
",",
"teacher_b",
")",
")",
"return",
"new_layer"
] |
wider next conv layer.
|
[
"wider",
"next",
"conv",
"layer",
"."
] |
python
|
train
| 45 |
siemens/django-dingos
|
dingos/models.py
|
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/models.py#L1496-L1508
|
def set_name(self,name=None):
"""
Set the name of the object. If no name is given, the
name is extracted via the extract_name method.
"""
if name:
self.name = name[:254]
else:
self.name = self.extract_name()[:254]
self.save()
return self.name
|
[
"def",
"set_name",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
":",
"self",
".",
"name",
"=",
"name",
"[",
":",
"254",
"]",
"else",
":",
"self",
".",
"name",
"=",
"self",
".",
"extract_name",
"(",
")",
"[",
":",
"254",
"]",
"self",
".",
"save",
"(",
")",
"return",
"self",
".",
"name"
] |
Set the name of the object. If no name is given, the
name is extracted via the extract_name method.
|
[
"Set",
"the",
"name",
"of",
"the",
"object",
".",
"If",
"no",
"name",
"is",
"given",
"the",
"name",
"is",
"extracted",
"via",
"the",
"extract_name",
"method",
"."
] |
python
|
train
| 24.615385 |
aliyun/aliyun-odps-python-sdk
|
odps/df/expr/strings.py
|
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/strings.py#L576-L597
|
def _pad(expr, width, side='left', fillchar=' '):
"""
Pad strings in the sequence or scalar with an additional character to specified side.
:param expr:
:param width: Minimum width of resulting string; additional characters will be filled with spaces
:param side: {‘left’, ‘right’, ‘both’}, default ‘left’
:param fillchar: Additional character for filling, default is whitespace
:return: sequence or scalar
"""
if not isinstance(fillchar, six.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if side not in ('left', 'right', 'both'):
raise ValueError('Invalid side')
return _string_op(expr, Pad, _width=width, _side=side, _fillchar=fillchar)
|
[
"def",
"_pad",
"(",
"expr",
",",
"width",
",",
"side",
"=",
"'left'",
",",
"fillchar",
"=",
"' '",
")",
":",
"if",
"not",
"isinstance",
"(",
"fillchar",
",",
"six",
".",
"string_types",
")",
":",
"msg",
"=",
"'fillchar must be a character, not {0}'",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"type",
"(",
"fillchar",
")",
".",
"__name__",
")",
")",
"if",
"len",
"(",
"fillchar",
")",
"!=",
"1",
":",
"raise",
"TypeError",
"(",
"'fillchar must be a character, not str'",
")",
"if",
"side",
"not",
"in",
"(",
"'left'",
",",
"'right'",
",",
"'both'",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid side'",
")",
"return",
"_string_op",
"(",
"expr",
",",
"Pad",
",",
"_width",
"=",
"width",
",",
"_side",
"=",
"side",
",",
"_fillchar",
"=",
"fillchar",
")"
] |
Pad strings in the sequence or scalar with an additional character to specified side.
:param expr:
:param width: Minimum width of resulting string; additional characters will be filled with spaces
:param side: {‘left’, ‘right’, ‘both’}, default ‘left’
:param fillchar: Additional character for filling, default is whitespace
:return: sequence or scalar
|
[
"Pad",
"strings",
"in",
"the",
"sequence",
"or",
"scalar",
"with",
"an",
"additional",
"character",
"to",
"specified",
"side",
"."
] |
python
|
train
| 38.636364 |
raphaelm/django-hierarkey
|
hierarkey/proxy.py
|
https://github.com/raphaelm/django-hierarkey/blob/3ca822f94fa633c9a6d5abe9c80cb1551299ae46/hierarkey/proxy.py#L191-L207
|
def set(self, key: str, value: Any) -> None:
"""
Stores a setting to the database of its object.
The write to the database is performed immediately and the cache in the cache backend is flushed.
The cache within this object will be updated correctly.
"""
wc = self._write_cache()
if key in wc:
s = wc[key]
else:
s = self._type(object=self._obj, key=key)
s.value = self._serialize(value)
s.save()
self._cache()[key] = s.value
wc[key] = s
self._flush_external_cache()
|
[
"def",
"set",
"(",
"self",
",",
"key",
":",
"str",
",",
"value",
":",
"Any",
")",
"->",
"None",
":",
"wc",
"=",
"self",
".",
"_write_cache",
"(",
")",
"if",
"key",
"in",
"wc",
":",
"s",
"=",
"wc",
"[",
"key",
"]",
"else",
":",
"s",
"=",
"self",
".",
"_type",
"(",
"object",
"=",
"self",
".",
"_obj",
",",
"key",
"=",
"key",
")",
"s",
".",
"value",
"=",
"self",
".",
"_serialize",
"(",
"value",
")",
"s",
".",
"save",
"(",
")",
"self",
".",
"_cache",
"(",
")",
"[",
"key",
"]",
"=",
"s",
".",
"value",
"wc",
"[",
"key",
"]",
"=",
"s",
"self",
".",
"_flush_external_cache",
"(",
")"
] |
Stores a setting to the database of its object.
The write to the database is performed immediately and the cache in the cache backend is flushed.
The cache within this object will be updated correctly.
|
[
"Stores",
"a",
"setting",
"to",
"the",
"database",
"of",
"its",
"object",
".",
"The",
"write",
"to",
"the",
"database",
"is",
"performed",
"immediately",
"and",
"the",
"cache",
"in",
"the",
"cache",
"backend",
"is",
"flushed",
".",
"The",
"cache",
"within",
"this",
"object",
"will",
"be",
"updated",
"correctly",
"."
] |
python
|
train
| 34.529412 |
ska-sa/katcp-python
|
katcp/client.py
|
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/client.py#L1199-L1240
|
def future_request(self, msg, timeout=None, use_mid=None):
"""Send a request messsage, with future replies.
Parameters
----------
msg : Message object
The request Message to send.
timeout : float in seconds
How long to wait for a reply. The default is the
the timeout set when creating the AsyncClient.
use_mid : boolean, optional
Whether to use message IDs. Default is to use message IDs
if the server supports them.
Returns
-------
A tornado.concurrent.Future that resolves with:
reply : Message object
The reply message received.
informs : list of Message objects
A list of the inform messages received.
"""
if timeout is None:
timeout = self._request_timeout
f = tornado_Future()
informs = []
def reply_cb(msg):
f.set_result((msg, informs))
def inform_cb(msg):
informs.append(msg)
try:
self.callback_request(msg, reply_cb=reply_cb, inform_cb=inform_cb,
timeout=timeout, use_mid=use_mid)
except Exception:
f.set_exc_info(sys.exc_info())
return f
|
[
"def",
"future_request",
"(",
"self",
",",
"msg",
",",
"timeout",
"=",
"None",
",",
"use_mid",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"timeout",
"=",
"self",
".",
"_request_timeout",
"f",
"=",
"tornado_Future",
"(",
")",
"informs",
"=",
"[",
"]",
"def",
"reply_cb",
"(",
"msg",
")",
":",
"f",
".",
"set_result",
"(",
"(",
"msg",
",",
"informs",
")",
")",
"def",
"inform_cb",
"(",
"msg",
")",
":",
"informs",
".",
"append",
"(",
"msg",
")",
"try",
":",
"self",
".",
"callback_request",
"(",
"msg",
",",
"reply_cb",
"=",
"reply_cb",
",",
"inform_cb",
"=",
"inform_cb",
",",
"timeout",
"=",
"timeout",
",",
"use_mid",
"=",
"use_mid",
")",
"except",
"Exception",
":",
"f",
".",
"set_exc_info",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
"return",
"f"
] |
Send a request messsage, with future replies.
Parameters
----------
msg : Message object
The request Message to send.
timeout : float in seconds
How long to wait for a reply. The default is the
the timeout set when creating the AsyncClient.
use_mid : boolean, optional
Whether to use message IDs. Default is to use message IDs
if the server supports them.
Returns
-------
A tornado.concurrent.Future that resolves with:
reply : Message object
The reply message received.
informs : list of Message objects
A list of the inform messages received.
|
[
"Send",
"a",
"request",
"messsage",
"with",
"future",
"replies",
"."
] |
python
|
train
| 29.833333 |
ldo/dbussy
|
dbussy.py
|
https://github.com/ldo/dbussy/blob/59e4fbe8b8111ceead884e50d1973901a0a2d240/dbussy.py#L5539-L5545
|
def signature_validate(signature, error = None) :
"is signature a valid sequence of zero or more complete types."
error, my_error = _get_error(error)
result = dbus.dbus_signature_validate(signature.encode(), error._dbobj) != 0
my_error.raise_if_set()
return \
result
|
[
"def",
"signature_validate",
"(",
"signature",
",",
"error",
"=",
"None",
")",
":",
"error",
",",
"my_error",
"=",
"_get_error",
"(",
"error",
")",
"result",
"=",
"dbus",
".",
"dbus_signature_validate",
"(",
"signature",
".",
"encode",
"(",
")",
",",
"error",
".",
"_dbobj",
")",
"!=",
"0",
"my_error",
".",
"raise_if_set",
"(",
")",
"return",
"result"
] |
is signature a valid sequence of zero or more complete types.
|
[
"is",
"signature",
"a",
"valid",
"sequence",
"of",
"zero",
"or",
"more",
"complete",
"types",
"."
] |
python
|
train
| 41.142857 |
JdeRobot/base
|
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py
|
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py#L170-L178
|
def average(var, key, N):
'''average over N points'''
global average_data
if not key in average_data:
average_data[key] = [var]*N
return var
average_data[key].pop(0)
average_data[key].append(var)
return sum(average_data[key])/N
|
[
"def",
"average",
"(",
"var",
",",
"key",
",",
"N",
")",
":",
"global",
"average_data",
"if",
"not",
"key",
"in",
"average_data",
":",
"average_data",
"[",
"key",
"]",
"=",
"[",
"var",
"]",
"*",
"N",
"return",
"var",
"average_data",
"[",
"key",
"]",
".",
"pop",
"(",
"0",
")",
"average_data",
"[",
"key",
"]",
".",
"append",
"(",
"var",
")",
"return",
"sum",
"(",
"average_data",
"[",
"key",
"]",
")",
"/",
"N"
] |
average over N points
|
[
"average",
"over",
"N",
"points"
] |
python
|
train
| 28.777778 |
spyder-ide/spyder
|
spyder/py3compat.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/py3compat.py#L150-L160
|
def to_binary_string(obj, encoding=None):
"""Convert `obj` to binary string (bytes in Python 3, str in Python 2)"""
if PY2:
# Python 2
if encoding is None:
return str(obj)
else:
return obj.encode(encoding)
else:
# Python 3
return bytes(obj, 'utf-8' if encoding is None else encoding)
|
[
"def",
"to_binary_string",
"(",
"obj",
",",
"encoding",
"=",
"None",
")",
":",
"if",
"PY2",
":",
"# Python 2\r",
"if",
"encoding",
"is",
"None",
":",
"return",
"str",
"(",
"obj",
")",
"else",
":",
"return",
"obj",
".",
"encode",
"(",
"encoding",
")",
"else",
":",
"# Python 3\r",
"return",
"bytes",
"(",
"obj",
",",
"'utf-8'",
"if",
"encoding",
"is",
"None",
"else",
"encoding",
")"
] |
Convert `obj` to binary string (bytes in Python 3, str in Python 2)
|
[
"Convert",
"obj",
"to",
"binary",
"string",
"(",
"bytes",
"in",
"Python",
"3",
"str",
"in",
"Python",
"2",
")"
] |
python
|
train
| 32.636364 |
assemblerflow/flowcraft
|
flowcraft/generator/inspect.py
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1533-L1550
|
def _get_run_hash(self):
"""Gets the hash of the nextflow file"""
# Get name and path of the pipeline from the log file
pipeline_path = get_nextflow_filepath(self.log_file)
# Get hash from the entire pipeline file
pipeline_hash = hashlib.md5()
with open(pipeline_path, "rb") as fh:
for chunk in iter(lambda: fh.read(4096), b""):
pipeline_hash.update(chunk)
# Get hash from the current working dir and hostname
workdir = self.workdir.encode("utf8")
hostname = socket.gethostname().encode("utf8")
hardware_addr = str(uuid.getnode()).encode("utf8")
dir_hash = hashlib.md5(workdir + hostname + hardware_addr)
return pipeline_hash.hexdigest() + dir_hash.hexdigest()
|
[
"def",
"_get_run_hash",
"(",
"self",
")",
":",
"# Get name and path of the pipeline from the log file",
"pipeline_path",
"=",
"get_nextflow_filepath",
"(",
"self",
".",
"log_file",
")",
"# Get hash from the entire pipeline file",
"pipeline_hash",
"=",
"hashlib",
".",
"md5",
"(",
")",
"with",
"open",
"(",
"pipeline_path",
",",
"\"rb\"",
")",
"as",
"fh",
":",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
"fh",
".",
"read",
"(",
"4096",
")",
",",
"b\"\"",
")",
":",
"pipeline_hash",
".",
"update",
"(",
"chunk",
")",
"# Get hash from the current working dir and hostname",
"workdir",
"=",
"self",
".",
"workdir",
".",
"encode",
"(",
"\"utf8\"",
")",
"hostname",
"=",
"socket",
".",
"gethostname",
"(",
")",
".",
"encode",
"(",
"\"utf8\"",
")",
"hardware_addr",
"=",
"str",
"(",
"uuid",
".",
"getnode",
"(",
")",
")",
".",
"encode",
"(",
"\"utf8\"",
")",
"dir_hash",
"=",
"hashlib",
".",
"md5",
"(",
"workdir",
"+",
"hostname",
"+",
"hardware_addr",
")",
"return",
"pipeline_hash",
".",
"hexdigest",
"(",
")",
"+",
"dir_hash",
".",
"hexdigest",
"(",
")"
] |
Gets the hash of the nextflow file
|
[
"Gets",
"the",
"hash",
"of",
"the",
"nextflow",
"file"
] |
python
|
test
| 42.777778 |
msmbuilder/msmbuilder
|
msmbuilder/example_datasets/brownian1d.py
|
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/example_datasets/brownian1d.py#L253-L258
|
def _brownian_eigs(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc):
"""Analytic eigenvalues/eigenvectors for 1D Brownian dynamics
"""
transmat = _brownian_transmat(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc)
u, lv, rv = _solve_msm_eigensystem(transmat, k=len(transmat) - 1)
return u, rv
|
[
"def",
"_brownian_eigs",
"(",
"n_grid",
",",
"lag_time",
",",
"grad_potential",
",",
"xmin",
",",
"xmax",
",",
"reflect_bc",
")",
":",
"transmat",
"=",
"_brownian_transmat",
"(",
"n_grid",
",",
"lag_time",
",",
"grad_potential",
",",
"xmin",
",",
"xmax",
",",
"reflect_bc",
")",
"u",
",",
"lv",
",",
"rv",
"=",
"_solve_msm_eigensystem",
"(",
"transmat",
",",
"k",
"=",
"len",
"(",
"transmat",
")",
"-",
"1",
")",
"return",
"u",
",",
"rv"
] |
Analytic eigenvalues/eigenvectors for 1D Brownian dynamics
|
[
"Analytic",
"eigenvalues",
"/",
"eigenvectors",
"for",
"1D",
"Brownian",
"dynamics"
] |
python
|
train
| 54.166667 |
happyleavesaoc/python-voobly
|
voobly/__init__.py
|
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L201-L210
|
def get_user(session, user_id):
"""Get user."""
try:
user_id = int(user_id)
except ValueError:
user_id = find_user(session, user_id)
resp = _make_request(session, USER_URL, user_id)
if not resp:
raise VooblyError('user id not found')
return resp[0]
|
[
"def",
"get_user",
"(",
"session",
",",
"user_id",
")",
":",
"try",
":",
"user_id",
"=",
"int",
"(",
"user_id",
")",
"except",
"ValueError",
":",
"user_id",
"=",
"find_user",
"(",
"session",
",",
"user_id",
")",
"resp",
"=",
"_make_request",
"(",
"session",
",",
"USER_URL",
",",
"user_id",
")",
"if",
"not",
"resp",
":",
"raise",
"VooblyError",
"(",
"'user id not found'",
")",
"return",
"resp",
"[",
"0",
"]"
] |
Get user.
|
[
"Get",
"user",
"."
] |
python
|
train
| 28.7 |
JNRowe/jnrbase
|
jnrbase/template.py
|
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/template.py#L145-L162
|
def setup(__pkg: str) -> jinja2.Environment:
"""Configure a new Jinja environment with our filters.
Args:
__pkg: Package name to use as base for templates searches
Returns:
Configured Jinja environment
"""
dirs = [path.join(d, 'templates')
for d in xdg_basedir.get_data_dirs(__pkg)]
env = jinja2.Environment(
autoescape=jinja2.select_autoescape(['html', 'xml']),
loader=jinja2.ChoiceLoader([jinja2.FileSystemLoader(s) for s in dirs]))
env.loader.loaders.append(jinja2.PackageLoader(__pkg, 'templates'))
env.filters.update(FILTERS)
return env
|
[
"def",
"setup",
"(",
"__pkg",
":",
"str",
")",
"->",
"jinja2",
".",
"Environment",
":",
"dirs",
"=",
"[",
"path",
".",
"join",
"(",
"d",
",",
"'templates'",
")",
"for",
"d",
"in",
"xdg_basedir",
".",
"get_data_dirs",
"(",
"__pkg",
")",
"]",
"env",
"=",
"jinja2",
".",
"Environment",
"(",
"autoescape",
"=",
"jinja2",
".",
"select_autoescape",
"(",
"[",
"'html'",
",",
"'xml'",
"]",
")",
",",
"loader",
"=",
"jinja2",
".",
"ChoiceLoader",
"(",
"[",
"jinja2",
".",
"FileSystemLoader",
"(",
"s",
")",
"for",
"s",
"in",
"dirs",
"]",
")",
")",
"env",
".",
"loader",
".",
"loaders",
".",
"append",
"(",
"jinja2",
".",
"PackageLoader",
"(",
"__pkg",
",",
"'templates'",
")",
")",
"env",
".",
"filters",
".",
"update",
"(",
"FILTERS",
")",
"return",
"env"
] |
Configure a new Jinja environment with our filters.
Args:
__pkg: Package name to use as base for templates searches
Returns:
Configured Jinja environment
|
[
"Configure",
"a",
"new",
"Jinja",
"environment",
"with",
"our",
"filters",
"."
] |
python
|
train
| 33.722222 |
cltl/KafNafParserPy
|
KafNafParserPy/feature_extractor/constituency.py
|
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/feature_extractor/constituency.py#L137-L173
|
def get_path_from_to(self,from_tid, to_tid):
"""
This function returns the path (in terms of phrase types) from one term to another
@type from_tid: string
@param from_tid: one term id
@type to_tid: string
@param to_tid: another term id
@rtype: list
@return: the path, list of phrase types
"""
shortest_subsumer = self.get_least_common_subsumer(from_tid, to_tid)
#print 'From:',self.naf.get_term(from_tid).get_lemma()
#print 'To:',self.naf.get_term(to_tid).get_lemma()
termid_from = self.terminal_for_term.get(from_tid)
termid_to = self.terminal_for_term.get(to_tid)
path_from = self.paths_for_terminal[termid_from][0]
path_to = self.paths_for_terminal[termid_to][0]
if shortest_subsumer is None:
return None
complete_path = []
for node in path_from:
complete_path.append(node)
if node == shortest_subsumer: break
begin=False
for node in path_to[-1::-1]:
if begin:
complete_path.append(node)
if node==shortest_subsumer:
begin=True
labels = [self.label_for_nonter[nonter] for nonter in complete_path]
return labels
|
[
"def",
"get_path_from_to",
"(",
"self",
",",
"from_tid",
",",
"to_tid",
")",
":",
"shortest_subsumer",
"=",
"self",
".",
"get_least_common_subsumer",
"(",
"from_tid",
",",
"to_tid",
")",
"#print 'From:',self.naf.get_term(from_tid).get_lemma()",
"#print 'To:',self.naf.get_term(to_tid).get_lemma()",
"termid_from",
"=",
"self",
".",
"terminal_for_term",
".",
"get",
"(",
"from_tid",
")",
"termid_to",
"=",
"self",
".",
"terminal_for_term",
".",
"get",
"(",
"to_tid",
")",
"path_from",
"=",
"self",
".",
"paths_for_terminal",
"[",
"termid_from",
"]",
"[",
"0",
"]",
"path_to",
"=",
"self",
".",
"paths_for_terminal",
"[",
"termid_to",
"]",
"[",
"0",
"]",
"if",
"shortest_subsumer",
"is",
"None",
":",
"return",
"None",
"complete_path",
"=",
"[",
"]",
"for",
"node",
"in",
"path_from",
":",
"complete_path",
".",
"append",
"(",
"node",
")",
"if",
"node",
"==",
"shortest_subsumer",
":",
"break",
"begin",
"=",
"False",
"for",
"node",
"in",
"path_to",
"[",
"-",
"1",
":",
":",
"-",
"1",
"]",
":",
"if",
"begin",
":",
"complete_path",
".",
"append",
"(",
"node",
")",
"if",
"node",
"==",
"shortest_subsumer",
":",
"begin",
"=",
"True",
"labels",
"=",
"[",
"self",
".",
"label_for_nonter",
"[",
"nonter",
"]",
"for",
"nonter",
"in",
"complete_path",
"]",
"return",
"labels"
] |
This function returns the path (in terms of phrase types) from one term to another
@type from_tid: string
@param from_tid: one term id
@type to_tid: string
@param to_tid: another term id
@rtype: list
@return: the path, list of phrase types
|
[
"This",
"function",
"returns",
"the",
"path",
"(",
"in",
"terms",
"of",
"phrase",
"types",
")",
"from",
"one",
"term",
"to",
"another"
] |
python
|
train
| 35.567568 |
Bonsanto/polygon-geohasher
|
polygon_geohasher/polygon_geohasher.py
|
https://github.com/Bonsanto/polygon-geohasher/blob/63f27f41ea3e9d8fda7872d86217719286037c11/polygon_geohasher/polygon_geohasher.py#L23-L62
|
def polygon_to_geohashes(polygon, precision, inner=True):
"""
:param polygon: shapely polygon.
:param precision: int. Geohashes' precision that form resulting polygon.
:param inner: bool, default 'True'. If false, geohashes that are completely outside from the polygon are ignored.
:return: set. Set of geohashes that form the polygon.
"""
inner_geohashes = set()
outer_geohashes = set()
envelope = polygon.envelope
centroid = polygon.centroid
testing_geohashes = queue.Queue()
testing_geohashes.put(geohash.encode(centroid.y, centroid.x, precision))
while not testing_geohashes.empty():
current_geohash = testing_geohashes.get()
if current_geohash not in inner_geohashes and current_geohash not in outer_geohashes:
current_polygon = geohash_to_polygon(current_geohash)
condition = envelope.contains(current_polygon) if inner else envelope.intersects(current_polygon)
if condition:
if inner:
if polygon.contains(current_polygon):
inner_geohashes.add(current_geohash)
else:
outer_geohashes.add(current_geohash)
else:
if polygon.intersects(current_polygon):
inner_geohashes.add(current_geohash)
else:
outer_geohashes.add(current_geohash)
for neighbor in geohash.neighbors(current_geohash):
if neighbor not in inner_geohashes and neighbor not in outer_geohashes:
testing_geohashes.put(neighbor)
return inner_geohashes
|
[
"def",
"polygon_to_geohashes",
"(",
"polygon",
",",
"precision",
",",
"inner",
"=",
"True",
")",
":",
"inner_geohashes",
"=",
"set",
"(",
")",
"outer_geohashes",
"=",
"set",
"(",
")",
"envelope",
"=",
"polygon",
".",
"envelope",
"centroid",
"=",
"polygon",
".",
"centroid",
"testing_geohashes",
"=",
"queue",
".",
"Queue",
"(",
")",
"testing_geohashes",
".",
"put",
"(",
"geohash",
".",
"encode",
"(",
"centroid",
".",
"y",
",",
"centroid",
".",
"x",
",",
"precision",
")",
")",
"while",
"not",
"testing_geohashes",
".",
"empty",
"(",
")",
":",
"current_geohash",
"=",
"testing_geohashes",
".",
"get",
"(",
")",
"if",
"current_geohash",
"not",
"in",
"inner_geohashes",
"and",
"current_geohash",
"not",
"in",
"outer_geohashes",
":",
"current_polygon",
"=",
"geohash_to_polygon",
"(",
"current_geohash",
")",
"condition",
"=",
"envelope",
".",
"contains",
"(",
"current_polygon",
")",
"if",
"inner",
"else",
"envelope",
".",
"intersects",
"(",
"current_polygon",
")",
"if",
"condition",
":",
"if",
"inner",
":",
"if",
"polygon",
".",
"contains",
"(",
"current_polygon",
")",
":",
"inner_geohashes",
".",
"add",
"(",
"current_geohash",
")",
"else",
":",
"outer_geohashes",
".",
"add",
"(",
"current_geohash",
")",
"else",
":",
"if",
"polygon",
".",
"intersects",
"(",
"current_polygon",
")",
":",
"inner_geohashes",
".",
"add",
"(",
"current_geohash",
")",
"else",
":",
"outer_geohashes",
".",
"add",
"(",
"current_geohash",
")",
"for",
"neighbor",
"in",
"geohash",
".",
"neighbors",
"(",
"current_geohash",
")",
":",
"if",
"neighbor",
"not",
"in",
"inner_geohashes",
"and",
"neighbor",
"not",
"in",
"outer_geohashes",
":",
"testing_geohashes",
".",
"put",
"(",
"neighbor",
")",
"return",
"inner_geohashes"
] |
:param polygon: shapely polygon.
:param precision: int. Geohashes' precision that form resulting polygon.
:param inner: bool, default 'True'. If false, geohashes that are completely outside from the polygon are ignored.
:return: set. Set of geohashes that form the polygon.
|
[
":",
"param",
"polygon",
":",
"shapely",
"polygon",
".",
":",
"param",
"precision",
":",
"int",
".",
"Geohashes",
"precision",
"that",
"form",
"resulting",
"polygon",
".",
":",
"param",
"inner",
":",
"bool",
"default",
"True",
".",
"If",
"false",
"geohashes",
"that",
"are",
"completely",
"outside",
"from",
"the",
"polygon",
"are",
"ignored",
".",
":",
"return",
":",
"set",
".",
"Set",
"of",
"geohashes",
"that",
"form",
"the",
"polygon",
"."
] |
python
|
train
| 41.45 |
CivicSpleen/ambry
|
ambry/orm/__init__.py
|
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/__init__.py#L74-L89
|
def table_convert_geometry(metadata, table_name):
"""Get table metadata from the database."""
from sqlalchemy import Table
from ..orm import Geometry
table = Table(table_name, metadata, autoload=True)
for c in table.columns:
# HACK! Sqlalchemy sees spatialte GEOMETRY types
# as NUMERIC
if c.name == 'geometry':
c.type = Geometry # What about variants?
return table
|
[
"def",
"table_convert_geometry",
"(",
"metadata",
",",
"table_name",
")",
":",
"from",
"sqlalchemy",
"import",
"Table",
"from",
".",
".",
"orm",
"import",
"Geometry",
"table",
"=",
"Table",
"(",
"table_name",
",",
"metadata",
",",
"autoload",
"=",
"True",
")",
"for",
"c",
"in",
"table",
".",
"columns",
":",
"# HACK! Sqlalchemy sees spatialte GEOMETRY types",
"# as NUMERIC",
"if",
"c",
".",
"name",
"==",
"'geometry'",
":",
"c",
".",
"type",
"=",
"Geometry",
"# What about variants?",
"return",
"table"
] |
Get table metadata from the database.
|
[
"Get",
"table",
"metadata",
"from",
"the",
"database",
"."
] |
python
|
train
| 25.9375 |
estnltk/estnltk
|
estnltk/syntax/syntax_preprocessing.py
|
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L954-L960
|
def process_Text( self, text, **kwargs ):
''' Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_Text_to_mrf( text )
return self.process_mrf_lines( mrf_lines, **kwargs )
|
[
"def",
"process_Text",
"(",
"self",
",",
"text",
",",
"*",
"*",
"kwargs",
")",
":",
"mrf_lines",
"=",
"convert_Text_to_mrf",
"(",
"text",
")",
"return",
"self",
".",
"process_mrf_lines",
"(",
"mrf_lines",
",",
"*",
"*",
"kwargs",
")"
] |
Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format;
|
[
"Executes",
"the",
"preprocessing",
"pipeline",
"on",
"estnltk",
"s",
"Text",
"object",
"."
] |
python
|
train
| 44 |
pandeylab/pythomics
|
pythomics/proteomics/parsers.py
|
https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/proteomics/parsers.py#L1490-L1526
|
def getScans(self, modifications=False, fdr=True):
"""
get a random scan
"""
if fdr:
sql = self.base_sql+"WHERE p.ConfidenceLevel >= {} and p.SearchEngineRank <= {} {}".format(self.clvl, self.srank, self.extra)
try:
self.cur.execute(sql)
except sqlite3.OperationalError:
sql = self.base_sql+"WHERE p.ConfidenceLevel >= {} {}".format(self.clvl, self.extra)
self.cur.execute(sql)
else:
sql = self.base_sql
self.cur.execute(sql)
while True:
# results = self.cur.fetchmany(1000)
# if not results:
# break
try:
tup = self.cur.fetchone()
except:
sys.stderr.write('Error fetching scan:\n{}\n'.format(traceback.format_exc()))
else:
while tup is not None:
if tup is None:
break
if tup[1] is not None:
scan = self.parseFullScan(tup, modifications=modifications)
scan.spectrumId = tup[3]
yield scan
try:
tup = self.cur.fetchone()
except:
sys.stderr.write('Error fetching scan:\n{}\n'.format(traceback.format_exc()))
if tup is None:
break
yield None
|
[
"def",
"getScans",
"(",
"self",
",",
"modifications",
"=",
"False",
",",
"fdr",
"=",
"True",
")",
":",
"if",
"fdr",
":",
"sql",
"=",
"self",
".",
"base_sql",
"+",
"\"WHERE p.ConfidenceLevel >= {} and p.SearchEngineRank <= {} {}\"",
".",
"format",
"(",
"self",
".",
"clvl",
",",
"self",
".",
"srank",
",",
"self",
".",
"extra",
")",
"try",
":",
"self",
".",
"cur",
".",
"execute",
"(",
"sql",
")",
"except",
"sqlite3",
".",
"OperationalError",
":",
"sql",
"=",
"self",
".",
"base_sql",
"+",
"\"WHERE p.ConfidenceLevel >= {} {}\"",
".",
"format",
"(",
"self",
".",
"clvl",
",",
"self",
".",
"extra",
")",
"self",
".",
"cur",
".",
"execute",
"(",
"sql",
")",
"else",
":",
"sql",
"=",
"self",
".",
"base_sql",
"self",
".",
"cur",
".",
"execute",
"(",
"sql",
")",
"while",
"True",
":",
"# results = self.cur.fetchmany(1000)",
"# if not results:",
"# break",
"try",
":",
"tup",
"=",
"self",
".",
"cur",
".",
"fetchone",
"(",
")",
"except",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Error fetching scan:\\n{}\\n'",
".",
"format",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"else",
":",
"while",
"tup",
"is",
"not",
"None",
":",
"if",
"tup",
"is",
"None",
":",
"break",
"if",
"tup",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"scan",
"=",
"self",
".",
"parseFullScan",
"(",
"tup",
",",
"modifications",
"=",
"modifications",
")",
"scan",
".",
"spectrumId",
"=",
"tup",
"[",
"3",
"]",
"yield",
"scan",
"try",
":",
"tup",
"=",
"self",
".",
"cur",
".",
"fetchone",
"(",
")",
"except",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Error fetching scan:\\n{}\\n'",
".",
"format",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"if",
"tup",
"is",
"None",
":",
"break",
"yield",
"None"
] |
get a random scan
|
[
"get",
"a",
"random",
"scan"
] |
python
|
train
| 39.162162 |
programa-stic/barf-project
|
barf/core/reil/builder.py
|
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/core/reil/builder.py#L122-L125
|
def gen_stm(src, dst):
"""Return a STM instruction.
"""
return ReilBuilder.build(ReilMnemonic.STM, src, ReilEmptyOperand(), dst)
|
[
"def",
"gen_stm",
"(",
"src",
",",
"dst",
")",
":",
"return",
"ReilBuilder",
".",
"build",
"(",
"ReilMnemonic",
".",
"STM",
",",
"src",
",",
"ReilEmptyOperand",
"(",
")",
",",
"dst",
")"
] |
Return a STM instruction.
|
[
"Return",
"a",
"STM",
"instruction",
"."
] |
python
|
train
| 37.25 |
angr/angr
|
angr/analyses/cfg/cfg_base.py
|
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_base.py#L2120-L2143
|
def _process_unresolved_indirect_jumps(self):
"""
Resolve all unresolved indirect jumps found in previous scanning.
Currently we support resolving the following types of indirect jumps:
- Ijk_Call: indirect calls where the function address is passed in from a proceeding basic block
- Ijk_Boring: jump tables
- For an up-to-date list, see analyses/cfg/indirect_jump_resolvers
:return: A set of concrete indirect jump targets (ints).
:rtype: set
"""
l.info("%d indirect jumps to resolve.", len(self._indirect_jumps_to_resolve))
all_targets = set()
for idx, jump in enumerate(self._indirect_jumps_to_resolve): # type:int,IndirectJump
if self._low_priority:
self._release_gil(idx, 20, 0.0001)
all_targets |= self._process_one_indirect_jump(jump)
self._indirect_jumps_to_resolve.clear()
return all_targets
|
[
"def",
"_process_unresolved_indirect_jumps",
"(",
"self",
")",
":",
"l",
".",
"info",
"(",
"\"%d indirect jumps to resolve.\"",
",",
"len",
"(",
"self",
".",
"_indirect_jumps_to_resolve",
")",
")",
"all_targets",
"=",
"set",
"(",
")",
"for",
"idx",
",",
"jump",
"in",
"enumerate",
"(",
"self",
".",
"_indirect_jumps_to_resolve",
")",
":",
"# type:int,IndirectJump",
"if",
"self",
".",
"_low_priority",
":",
"self",
".",
"_release_gil",
"(",
"idx",
",",
"20",
",",
"0.0001",
")",
"all_targets",
"|=",
"self",
".",
"_process_one_indirect_jump",
"(",
"jump",
")",
"self",
".",
"_indirect_jumps_to_resolve",
".",
"clear",
"(",
")",
"return",
"all_targets"
] |
Resolve all unresolved indirect jumps found in previous scanning.
Currently we support resolving the following types of indirect jumps:
- Ijk_Call: indirect calls where the function address is passed in from a proceeding basic block
- Ijk_Boring: jump tables
- For an up-to-date list, see analyses/cfg/indirect_jump_resolvers
:return: A set of concrete indirect jump targets (ints).
:rtype: set
|
[
"Resolve",
"all",
"unresolved",
"indirect",
"jumps",
"found",
"in",
"previous",
"scanning",
"."
] |
python
|
train
| 39.333333 |
PyCQA/astroid
|
astroid/scoped_nodes.py
|
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/scoped_nodes.py#L145-L156
|
def qname(self):
"""Get the 'qualified' name of the node.
For example: module.name, module.class.name ...
:returns: The qualified name.
:rtype: str
"""
# pylint: disable=no-member; github.com/pycqa/astroid/issues/278
if self.parent is None:
return self.name
return "%s.%s" % (self.parent.frame().qname(), self.name)
|
[
"def",
"qname",
"(",
"self",
")",
":",
"# pylint: disable=no-member; github.com/pycqa/astroid/issues/278",
"if",
"self",
".",
"parent",
"is",
"None",
":",
"return",
"self",
".",
"name",
"return",
"\"%s.%s\"",
"%",
"(",
"self",
".",
"parent",
".",
"frame",
"(",
")",
".",
"qname",
"(",
")",
",",
"self",
".",
"name",
")"
] |
Get the 'qualified' name of the node.
For example: module.name, module.class.name ...
:returns: The qualified name.
:rtype: str
|
[
"Get",
"the",
"qualified",
"name",
"of",
"the",
"node",
"."
] |
python
|
train
| 31.833333 |
mcs07/ChemDataExtractor
|
chemdataextractor/cli/chemdner.py
|
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/chemdner.py#L47-L67
|
def prepare_tokens(ctx, input, annotations, tout, lout):
"""Prepare tokenized and tagged corpus file from those supplied by CHEMDNER."""
click.echo('chemdataextractor.chemdner.prepare_tokens')
# Collect the annotations into a dict
anndict = defaultdict(list)
for line in annotations:
pmid, ta, start, end, text, category = line.strip().split('\t')
anndict[(pmid, ta)].append((int(start), int(end), text))
# Process the corpus
for line in input:
pmid, title, abstract = line.strip().split(u'\t')
for t, section, anns in [(Title(title), 'T', anndict.get((pmid, u'T'), [])), (Paragraph(abstract), u'A', anndict.get((pmid, u'A'), []))]:
# Write our tokens with POS and IOB tags
tagged = _prep_tags(t, anns)
for i, sentence in enumerate(tagged):
tout.write(u' '.join([u'/'.join([token, tag, label]) for token, tag, label in sentence]))
lout.write(u' '.join([u'/'.join([token, label]) for token, tag, label in sentence]))
tout.write(u'\n')
lout.write(u'\n')
tout.write(u'\n')
lout.write(u'\n')
|
[
"def",
"prepare_tokens",
"(",
"ctx",
",",
"input",
",",
"annotations",
",",
"tout",
",",
"lout",
")",
":",
"click",
".",
"echo",
"(",
"'chemdataextractor.chemdner.prepare_tokens'",
")",
"# Collect the annotations into a dict",
"anndict",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"line",
"in",
"annotations",
":",
"pmid",
",",
"ta",
",",
"start",
",",
"end",
",",
"text",
",",
"category",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"anndict",
"[",
"(",
"pmid",
",",
"ta",
")",
"]",
".",
"append",
"(",
"(",
"int",
"(",
"start",
")",
",",
"int",
"(",
"end",
")",
",",
"text",
")",
")",
"# Process the corpus",
"for",
"line",
"in",
"input",
":",
"pmid",
",",
"title",
",",
"abstract",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"u'\\t'",
")",
"for",
"t",
",",
"section",
",",
"anns",
"in",
"[",
"(",
"Title",
"(",
"title",
")",
",",
"'T'",
",",
"anndict",
".",
"get",
"(",
"(",
"pmid",
",",
"u'T'",
")",
",",
"[",
"]",
")",
")",
",",
"(",
"Paragraph",
"(",
"abstract",
")",
",",
"u'A'",
",",
"anndict",
".",
"get",
"(",
"(",
"pmid",
",",
"u'A'",
")",
",",
"[",
"]",
")",
")",
"]",
":",
"# Write our tokens with POS and IOB tags",
"tagged",
"=",
"_prep_tags",
"(",
"t",
",",
"anns",
")",
"for",
"i",
",",
"sentence",
"in",
"enumerate",
"(",
"tagged",
")",
":",
"tout",
".",
"write",
"(",
"u' '",
".",
"join",
"(",
"[",
"u'/'",
".",
"join",
"(",
"[",
"token",
",",
"tag",
",",
"label",
"]",
")",
"for",
"token",
",",
"tag",
",",
"label",
"in",
"sentence",
"]",
")",
")",
"lout",
".",
"write",
"(",
"u' '",
".",
"join",
"(",
"[",
"u'/'",
".",
"join",
"(",
"[",
"token",
",",
"label",
"]",
")",
"for",
"token",
",",
"tag",
",",
"label",
"in",
"sentence",
"]",
")",
")",
"tout",
".",
"write",
"(",
"u'\\n'",
")",
"lout",
".",
"write",
"(",
"u'\\n'",
")",
"tout",
".",
"write",
"(",
"u'\\n'",
")",
"lout",
".",
"write",
"(",
"u'\\n'",
")"
] |
Prepare tokenized and tagged corpus file from those supplied by CHEMDNER.
|
[
"Prepare",
"tokenized",
"and",
"tagged",
"corpus",
"file",
"from",
"those",
"supplied",
"by",
"CHEMDNER",
"."
] |
python
|
train
| 54.809524 |
NICTA/revrand
|
revrand/glm.py
|
https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/glm.py#L349-L418
|
def predict_moments(self, X, nsamples=200, likelihood_args=()):
r"""
Predictive moments, in particular mean and variance, of a Bayesian GLM.
This function uses Monte-Carlo sampling to evaluate the predictive mean
and variance of a Bayesian GLM. The exact expressions evaluated are,
.. math ::
\mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
\mathbb{V}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \left(\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
- \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y]\right)^2
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
where :math:`\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]` is the
the expected value of :math:`y^*` from the likelihood, and
:math:`p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi)` is the posterior
distribution over weights (from ``learn``). Here are few concrete
examples of how we can use these values,
- Gaussian likelihood: these are just the predicted mean and variance,
see ``revrand.regression.predict``
- Bernoulli likelihood: The expected value is the probability,
:math:`p(y^* = 1)`, i.e. the probability of class one. The variance
may not be so useful.
- Poisson likelihood: The expected value is similar conceptually to the
Gaussian case, and is also a *continuous* value. The median (50%
quantile) from ``predict_interval`` is a discrete value. Again,
the variance in this instance may not be so useful.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d
dimensions).
nsamples : int, optional
Number of samples for sampling the expected moments from the
predictive distribution.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N.
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
Vy : ndarray
The expected variance of y* (excluding likelihood noise terms) for
the query inputs, X* of shape (N*,).
"""
# Get latent function samples
N = X.shape[0]
ys = np.empty((N, nsamples))
fsamples = self._sample_func(X, nsamples)
# Push samples though likelihood expected value
Eyargs = tuple(chain(atleast_list(self.like_hypers_), likelihood_args))
for i, f in enumerate(fsamples):
ys[:, i] = self.likelihood.Ey(f, *Eyargs)
# Average transformed samples (MC integration)
Ey = ys.mean(axis=1)
Vy = ((ys - Ey[:, np.newaxis])**2).mean(axis=1)
return Ey, Vy
|
[
"def",
"predict_moments",
"(",
"self",
",",
"X",
",",
"nsamples",
"=",
"200",
",",
"likelihood_args",
"=",
"(",
")",
")",
":",
"# Get latent function samples",
"N",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"ys",
"=",
"np",
".",
"empty",
"(",
"(",
"N",
",",
"nsamples",
")",
")",
"fsamples",
"=",
"self",
".",
"_sample_func",
"(",
"X",
",",
"nsamples",
")",
"# Push samples though likelihood expected value",
"Eyargs",
"=",
"tuple",
"(",
"chain",
"(",
"atleast_list",
"(",
"self",
".",
"like_hypers_",
")",
",",
"likelihood_args",
")",
")",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"fsamples",
")",
":",
"ys",
"[",
":",
",",
"i",
"]",
"=",
"self",
".",
"likelihood",
".",
"Ey",
"(",
"f",
",",
"*",
"Eyargs",
")",
"# Average transformed samples (MC integration)",
"Ey",
"=",
"ys",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"Vy",
"=",
"(",
"(",
"ys",
"-",
"Ey",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
"**",
"2",
")",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"return",
"Ey",
",",
"Vy"
] |
r"""
Predictive moments, in particular mean and variance, of a Bayesian GLM.
This function uses Monte-Carlo sampling to evaluate the predictive mean
and variance of a Bayesian GLM. The exact expressions evaluated are,
.. math ::
\mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
\mathbb{V}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \left(\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
- \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y]\right)^2
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
where :math:`\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]` is the
the expected value of :math:`y^*` from the likelihood, and
:math:`p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi)` is the posterior
distribution over weights (from ``learn``). Here are few concrete
examples of how we can use these values,
- Gaussian likelihood: these are just the predicted mean and variance,
see ``revrand.regression.predict``
- Bernoulli likelihood: The expected value is the probability,
:math:`p(y^* = 1)`, i.e. the probability of class one. The variance
may not be so useful.
- Poisson likelihood: The expected value is similar conceptually to the
Gaussian case, and is also a *continuous* value. The median (50%
quantile) from ``predict_interval`` is a discrete value. Again,
the variance in this instance may not be so useful.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d
dimensions).
nsamples : int, optional
Number of samples for sampling the expected moments from the
predictive distribution.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N.
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
Vy : ndarray
The expected variance of y* (excluding likelihood noise terms) for
the query inputs, X* of shape (N*,).
|
[
"r",
"Predictive",
"moments",
"in",
"particular",
"mean",
"and",
"variance",
"of",
"a",
"Bayesian",
"GLM",
"."
] |
python
|
train
| 43.428571 |
Calysto/calysto
|
calysto/ai/conx.py
|
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L372-L376
|
def getCorrect(self, tolerance):
"""
Returns the number of nodes within tolerance of the target.
"""
return Numeric.add.reduce(Numeric.fabs(self.target - self.activation) < tolerance)
|
[
"def",
"getCorrect",
"(",
"self",
",",
"tolerance",
")",
":",
"return",
"Numeric",
".",
"add",
".",
"reduce",
"(",
"Numeric",
".",
"fabs",
"(",
"self",
".",
"target",
"-",
"self",
".",
"activation",
")",
"<",
"tolerance",
")"
] |
Returns the number of nodes within tolerance of the target.
|
[
"Returns",
"the",
"number",
"of",
"nodes",
"within",
"tolerance",
"of",
"the",
"target",
"."
] |
python
|
train
| 42.2 |
DataDog/integrations-core
|
tokumx/datadog_checks/tokumx/vendor/pymongo/pool.py
|
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/pool.py#L533-L549
|
def write_command(self, request_id, msg):
"""Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
"""
self.send_message(msg, 0)
response = helpers._unpack_response(self.receive_message(1, request_id))
assert response['number_returned'] == 1
result = response['data'][0]
# Raises NotMasterError or OperationFailure.
helpers._check_command_response(result)
return result
|
[
"def",
"write_command",
"(",
"self",
",",
"request_id",
",",
"msg",
")",
":",
"self",
".",
"send_message",
"(",
"msg",
",",
"0",
")",
"response",
"=",
"helpers",
".",
"_unpack_response",
"(",
"self",
".",
"receive_message",
"(",
"1",
",",
"request_id",
")",
")",
"assert",
"response",
"[",
"'number_returned'",
"]",
"==",
"1",
"result",
"=",
"response",
"[",
"'data'",
"]",
"[",
"0",
"]",
"# Raises NotMasterError or OperationFailure.",
"helpers",
".",
"_check_command_response",
"(",
"result",
")",
"return",
"result"
] |
Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
|
[
"Send",
"insert",
"etc",
".",
"command",
"returning",
"response",
"as",
"a",
"dict",
"."
] |
python
|
train
| 34.764706 |
05bit/peewee-async
|
peewee_async.py
|
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L558-L577
|
async def select(query):
"""Perform SELECT query asynchronously.
"""
assert isinstance(query, peewee.SelectQuery),\
("Error, trying to run select coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
result = AsyncQueryWrapper(cursor=cursor, query=query)
try:
while True:
await result.fetchone()
except GeneratorExit:
pass
finally:
await cursor.release()
return result
|
[
"async",
"def",
"select",
"(",
"query",
")",
":",
"assert",
"isinstance",
"(",
"query",
",",
"peewee",
".",
"SelectQuery",
")",
",",
"(",
"\"Error, trying to run select coroutine\"",
"\"with wrong query class %s\"",
"%",
"str",
"(",
"query",
")",
")",
"cursor",
"=",
"await",
"_execute_query_async",
"(",
"query",
")",
"result",
"=",
"AsyncQueryWrapper",
"(",
"cursor",
"=",
"cursor",
",",
"query",
"=",
"query",
")",
"try",
":",
"while",
"True",
":",
"await",
"result",
".",
"fetchone",
"(",
")",
"except",
"GeneratorExit",
":",
"pass",
"finally",
":",
"await",
"cursor",
".",
"release",
"(",
")",
"return",
"result"
] |
Perform SELECT query asynchronously.
|
[
"Perform",
"SELECT",
"query",
"asynchronously",
"."
] |
python
|
train
| 24.2 |
saltstack/salt
|
salt/modules/jboss7.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/jboss7.py#L138-L174
|
def create_datasource(jboss_config, name, datasource_properties, profile=None):
'''
Create datasource in running jboss instance
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
datasource_properties
A dictionary of datasource properties to be created:
- driver-name: mysql
- connection-url: 'jdbc:mysql://localhost:3306/sampleDatabase'
- jndi-name: 'java:jboss/datasources/sampleDS'
- user-name: sampleuser
- password: secret
- min-pool-size: 3
- use-java-context: True
profile
The profile name (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.create_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' 'my_datasource' '{"driver-name": "mysql", "connection-url": "jdbc:mysql://localhost:3306/sampleDatabase", "jndi-name": "java:jboss/datasources/sampleDS", "user-name": "sampleuser", "password": "secret", "min-pool-size": 3, "use-java-context": True}'
'''
log.debug("======================== MODULE FUNCTION: jboss7.create_datasource, name=%s, profile=%s", name, profile)
ds_resource_description = __get_datasource_resource_description(jboss_config, name, profile)
operation = '/subsystem=datasources/data-source="{name}":add({properties})'.format(
name=name,
properties=__get_properties_assignment_string(datasource_properties, ds_resource_description)
)
if profile is not None:
operation = '/profile="{profile}"'.format(profile=profile) + operation
return __salt__['jboss7_cli.run_operation'](jboss_config, operation, fail_on_error=False)
|
[
"def",
"create_datasource",
"(",
"jboss_config",
",",
"name",
",",
"datasource_properties",
",",
"profile",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"\"======================== MODULE FUNCTION: jboss7.create_datasource, name=%s, profile=%s\"",
",",
"name",
",",
"profile",
")",
"ds_resource_description",
"=",
"__get_datasource_resource_description",
"(",
"jboss_config",
",",
"name",
",",
"profile",
")",
"operation",
"=",
"'/subsystem=datasources/data-source=\"{name}\":add({properties})'",
".",
"format",
"(",
"name",
"=",
"name",
",",
"properties",
"=",
"__get_properties_assignment_string",
"(",
"datasource_properties",
",",
"ds_resource_description",
")",
")",
"if",
"profile",
"is",
"not",
"None",
":",
"operation",
"=",
"'/profile=\"{profile}\"'",
".",
"format",
"(",
"profile",
"=",
"profile",
")",
"+",
"operation",
"return",
"__salt__",
"[",
"'jboss7_cli.run_operation'",
"]",
"(",
"jboss_config",
",",
"operation",
",",
"fail_on_error",
"=",
"False",
")"
] |
Create datasource in running jboss instance
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
datasource_properties
A dictionary of datasource properties to be created:
- driver-name: mysql
- connection-url: 'jdbc:mysql://localhost:3306/sampleDatabase'
- jndi-name: 'java:jboss/datasources/sampleDS'
- user-name: sampleuser
- password: secret
- min-pool-size: 3
- use-java-context: True
profile
The profile name (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.create_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' 'my_datasource' '{"driver-name": "mysql", "connection-url": "jdbc:mysql://localhost:3306/sampleDatabase", "jndi-name": "java:jboss/datasources/sampleDS", "user-name": "sampleuser", "password": "secret", "min-pool-size": 3, "use-java-context": True}'
|
[
"Create",
"datasource",
"in",
"running",
"jboss",
"instance"
] |
python
|
train
| 48.486486 |
NuGrid/NuGridPy
|
nugridpy/mesa.py
|
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L898-L926
|
def _read_starlog(self):
""" read history.data or star.log file again"""
sldir = self.sldir
slname = self.slname
slaname = slname+'sa'
if not os.path.exists(sldir+'/'+slaname):
print('No '+self.slname+'sa file found, create new one from '+self.slname)
_cleanstarlog(sldir+'/'+slname)
else:
if self.clean_starlog:
print('Requested new '+self.slname+'sa; create new from '+self.slname)
_cleanstarlog(sldir+'/'+slname)
else:
print('Using old '+self.slname+'sa file ...')
cmd=os.popen('wc '+sldir+'/'+slaname)
cmd_out=cmd.readline()
cnum_cycles=cmd_out.split()[0]
num_cycles=int(cnum_cycles) - 6
filename=sldir+'/'+slaname
header_attr,cols,data = _read_mesafile(filename,data_rows=num_cycles)
self.cols = cols
self.header_attr = header_attr
self.data = data
|
[
"def",
"_read_starlog",
"(",
"self",
")",
":",
"sldir",
"=",
"self",
".",
"sldir",
"slname",
"=",
"self",
".",
"slname",
"slaname",
"=",
"slname",
"+",
"'sa'",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sldir",
"+",
"'/'",
"+",
"slaname",
")",
":",
"print",
"(",
"'No '",
"+",
"self",
".",
"slname",
"+",
"'sa file found, create new one from '",
"+",
"self",
".",
"slname",
")",
"_cleanstarlog",
"(",
"sldir",
"+",
"'/'",
"+",
"slname",
")",
"else",
":",
"if",
"self",
".",
"clean_starlog",
":",
"print",
"(",
"'Requested new '",
"+",
"self",
".",
"slname",
"+",
"'sa; create new from '",
"+",
"self",
".",
"slname",
")",
"_cleanstarlog",
"(",
"sldir",
"+",
"'/'",
"+",
"slname",
")",
"else",
":",
"print",
"(",
"'Using old '",
"+",
"self",
".",
"slname",
"+",
"'sa file ...'",
")",
"cmd",
"=",
"os",
".",
"popen",
"(",
"'wc '",
"+",
"sldir",
"+",
"'/'",
"+",
"slaname",
")",
"cmd_out",
"=",
"cmd",
".",
"readline",
"(",
")",
"cnum_cycles",
"=",
"cmd_out",
".",
"split",
"(",
")",
"[",
"0",
"]",
"num_cycles",
"=",
"int",
"(",
"cnum_cycles",
")",
"-",
"6",
"filename",
"=",
"sldir",
"+",
"'/'",
"+",
"slaname",
"header_attr",
",",
"cols",
",",
"data",
"=",
"_read_mesafile",
"(",
"filename",
",",
"data_rows",
"=",
"num_cycles",
")",
"self",
".",
"cols",
"=",
"cols",
"self",
".",
"header_attr",
"=",
"header_attr",
"self",
".",
"data",
"=",
"data"
] |
read history.data or star.log file again
|
[
"read",
"history",
".",
"data",
"or",
"star",
".",
"log",
"file",
"again"
] |
python
|
train
| 33.241379 |
tensorpack/tensorpack
|
examples/SimilarityLearning/mnist-embeddings.py
|
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/SimilarityLearning/mnist-embeddings.py#L25-L65
|
def contrastive_loss(left, right, y, margin, extra=False, scope="constrastive_loss"):
r"""Loss for Siamese networks as described in the paper:
`Learning a Similarity Metric Discriminatively, with Application to Face
Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al.
.. math::
\frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2
Args:
left (tf.Tensor): left feature vectors of shape [Batch, N].
right (tf.Tensor): right feature vectors of shape [Batch, N].
y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar.
margin (float): horizon for negative examples (y==0).
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist)
"""
with tf.name_scope(scope):
y = tf.cast(y, tf.float32)
delta = tf.reduce_sum(tf.square(left - right), 1)
delta_sqrt = tf.sqrt(delta + 1e-10)
match_loss = delta
missmatch_loss = tf.square(tf.nn.relu(margin - delta_sqrt))
loss = tf.reduce_mean(0.5 * (y * match_loss + (1 - y) * missmatch_loss))
if extra:
num_pos = tf.count_nonzero(y)
num_neg = tf.count_nonzero(1 - y)
pos_dist = tf.where(tf.equal(num_pos, 0), 0.,
tf.reduce_sum(y * delta_sqrt) / tf.cast(num_pos, tf.float32),
name="pos-dist")
neg_dist = tf.where(tf.equal(num_neg, 0), 0.,
tf.reduce_sum((1 - y) * delta_sqrt) / tf.cast(num_neg, tf.float32),
name="neg-dist")
return loss, pos_dist, neg_dist
else:
return loss
|
[
"def",
"contrastive_loss",
"(",
"left",
",",
"right",
",",
"y",
",",
"margin",
",",
"extra",
"=",
"False",
",",
"scope",
"=",
"\"constrastive_loss\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"scope",
")",
":",
"y",
"=",
"tf",
".",
"cast",
"(",
"y",
",",
"tf",
".",
"float32",
")",
"delta",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"square",
"(",
"left",
"-",
"right",
")",
",",
"1",
")",
"delta_sqrt",
"=",
"tf",
".",
"sqrt",
"(",
"delta",
"+",
"1e-10",
")",
"match_loss",
"=",
"delta",
"missmatch_loss",
"=",
"tf",
".",
"square",
"(",
"tf",
".",
"nn",
".",
"relu",
"(",
"margin",
"-",
"delta_sqrt",
")",
")",
"loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"0.5",
"*",
"(",
"y",
"*",
"match_loss",
"+",
"(",
"1",
"-",
"y",
")",
"*",
"missmatch_loss",
")",
")",
"if",
"extra",
":",
"num_pos",
"=",
"tf",
".",
"count_nonzero",
"(",
"y",
")",
"num_neg",
"=",
"tf",
".",
"count_nonzero",
"(",
"1",
"-",
"y",
")",
"pos_dist",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"equal",
"(",
"num_pos",
",",
"0",
")",
",",
"0.",
",",
"tf",
".",
"reduce_sum",
"(",
"y",
"*",
"delta_sqrt",
")",
"/",
"tf",
".",
"cast",
"(",
"num_pos",
",",
"tf",
".",
"float32",
")",
",",
"name",
"=",
"\"pos-dist\"",
")",
"neg_dist",
"=",
"tf",
".",
"where",
"(",
"tf",
".",
"equal",
"(",
"num_neg",
",",
"0",
")",
",",
"0.",
",",
"tf",
".",
"reduce_sum",
"(",
"(",
"1",
"-",
"y",
")",
"*",
"delta_sqrt",
")",
"/",
"tf",
".",
"cast",
"(",
"num_neg",
",",
"tf",
".",
"float32",
")",
",",
"name",
"=",
"\"neg-dist\"",
")",
"return",
"loss",
",",
"pos_dist",
",",
"neg_dist",
"else",
":",
"return",
"loss"
] |
r"""Loss for Siamese networks as described in the paper:
`Learning a Similarity Metric Discriminatively, with Application to Face
Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al.
.. math::
\frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2
Args:
left (tf.Tensor): left feature vectors of shape [Batch, N].
right (tf.Tensor): right feature vectors of shape [Batch, N].
y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar.
margin (float): horizon for negative examples (y==0).
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist)
|
[
"r",
"Loss",
"for",
"Siamese",
"networks",
"as",
"described",
"in",
"the",
"paper",
":",
"Learning",
"a",
"Similarity",
"Metric",
"Discriminatively",
"with",
"Application",
"to",
"Face",
"Verification",
"<http",
":",
"//",
"yann",
".",
"lecun",
".",
"com",
"/",
"exdb",
"/",
"publis",
"/",
"pdf",
"/",
"chopra",
"-",
"05",
".",
"pdf",
">",
"_",
"by",
"Chopra",
"et",
"al",
"."
] |
python
|
train
| 44.268293 |
coleifer/walrus
|
walrus/fts.py
|
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/fts.py#L65-L82
|
def remove(self, key, preserve_data=False):
"""
:param key: Document unique identifier.
Remove the document from the search index.
"""
if self.members.remove(key) != 1:
raise KeyError('Document with key "%s" not found.' % key)
document_hash = self._get_hash(key)
content = decode(document_hash['content'])
if not preserve_data:
document_hash.clear()
for word in self.tokenizer.tokenize(content):
word_key = self.get_key(word)
del word_key[key]
if len(word_key) == 0:
word_key.clear()
|
[
"def",
"remove",
"(",
"self",
",",
"key",
",",
"preserve_data",
"=",
"False",
")",
":",
"if",
"self",
".",
"members",
".",
"remove",
"(",
"key",
")",
"!=",
"1",
":",
"raise",
"KeyError",
"(",
"'Document with key \"%s\" not found.'",
"%",
"key",
")",
"document_hash",
"=",
"self",
".",
"_get_hash",
"(",
"key",
")",
"content",
"=",
"decode",
"(",
"document_hash",
"[",
"'content'",
"]",
")",
"if",
"not",
"preserve_data",
":",
"document_hash",
".",
"clear",
"(",
")",
"for",
"word",
"in",
"self",
".",
"tokenizer",
".",
"tokenize",
"(",
"content",
")",
":",
"word_key",
"=",
"self",
".",
"get_key",
"(",
"word",
")",
"del",
"word_key",
"[",
"key",
"]",
"if",
"len",
"(",
"word_key",
")",
"==",
"0",
":",
"word_key",
".",
"clear",
"(",
")"
] |
:param key: Document unique identifier.
Remove the document from the search index.
|
[
":",
"param",
"key",
":",
"Document",
"unique",
"identifier",
"."
] |
python
|
train
| 34.222222 |
nefarioustim/parker
|
parker/fileops.py
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/fileops.py#L32-L40
|
def dump_string_to_file(string, filepath):
"""Dump @string as a line to @filepath."""
create_dirs(
os.path.dirname(filepath)
)
with open(filepath, 'a') as outfile:
outfile.write(string)
outfile.write('\n')
|
[
"def",
"dump_string_to_file",
"(",
"string",
",",
"filepath",
")",
":",
"create_dirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"filepath",
")",
")",
"with",
"open",
"(",
"filepath",
",",
"'a'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"string",
")",
"outfile",
".",
"write",
"(",
"'\\n'",
")"
] |
Dump @string as a line to @filepath.
|
[
"Dump"
] |
python
|
train
| 26.444444 |
isogeo/isogeo-api-py-minsdk
|
isogeo_pysdk/checker.py
|
https://github.com/isogeo/isogeo-api-py-minsdk/blob/57a604be92c7767b26abd247012cc1a584b386a0/isogeo_pysdk/checker.py#L156-L173
|
def check_api_response(self, response):
"""Check API response and raise exceptions if needed.
:param requests.models.Response response: request response to check
"""
# check response
if response.status_code == 200:
return True
elif response.status_code >= 400:
logging.error(
"{}: {} - {} - URL: {}".format(
response.status_code,
response.reason,
response.json().get("error"),
response.request.url,
)
)
return False, response.status_code
|
[
"def",
"check_api_response",
"(",
"self",
",",
"response",
")",
":",
"# check response",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"return",
"True",
"elif",
"response",
".",
"status_code",
">=",
"400",
":",
"logging",
".",
"error",
"(",
"\"{}: {} - {} - URL: {}\"",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"reason",
",",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"\"error\"",
")",
",",
"response",
".",
"request",
".",
"url",
",",
")",
")",
"return",
"False",
",",
"response",
".",
"status_code"
] |
Check API response and raise exceptions if needed.
:param requests.models.Response response: request response to check
|
[
"Check",
"API",
"response",
"and",
"raise",
"exceptions",
"if",
"needed",
"."
] |
python
|
train
| 34.944444 |
mottosso/be
|
be/vendor/requests/utils.py
|
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/utils.py#L676-L692
|
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
|
[
"def",
"to_native_string",
"(",
"string",
",",
"encoding",
"=",
"'ascii'",
")",
":",
"out",
"=",
"None",
"if",
"isinstance",
"(",
"string",
",",
"builtin_str",
")",
":",
"out",
"=",
"string",
"else",
":",
"if",
"is_py2",
":",
"out",
"=",
"string",
".",
"encode",
"(",
"encoding",
")",
"else",
":",
"out",
"=",
"string",
".",
"decode",
"(",
"encoding",
")",
"return",
"out"
] |
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
|
[
"Given",
"a",
"string",
"object",
"regardless",
"of",
"type",
"returns",
"a",
"representation",
"of",
"that",
"string",
"in",
"the",
"native",
"string",
"type",
"encoding",
"and",
"decoding",
"where",
"necessary",
".",
"This",
"assumes",
"ASCII",
"unless",
"told",
"otherwise",
"."
] |
python
|
train
| 27.647059 |
googlecolab/jupyter_http_over_ws
|
jupyter_http_over_ws/handlers.py
|
https://github.com/googlecolab/jupyter_http_over_ws/blob/21fe278cae6fca4e6c92f92d6d786fae8fdea9b1/jupyter_http_over_ws/handlers.py#L261-L288
|
def streaming_callback(self, body_part):
"""Handles a streaming chunk of the response.
The streaming_response callback gives no indication about whether the
received chunk is the last in the stream. The "last_response" instance
variable allows us to keep track of the last received chunk of the
response. Each time this is called, the previous chunk is emitted. The
done() method is expected to be called after the response completes to
ensure that the last piece of data is sent.
Args:
body_part: A chunk of the streaming response.
"""
b64_body_string = base64.b64encode(body_part).decode('utf-8')
response = {
'message_id': self._message_id,
'data': b64_body_string,
}
if self._last_response is None:
# This represents the first chunk of data to be streamed to the caller.
# Attach status and header information to this item.
response.update(self._generate_metadata_body())
else:
self._last_response['done'] = False
self._write_message_func(self._last_response)
self._last_response = response
|
[
"def",
"streaming_callback",
"(",
"self",
",",
"body_part",
")",
":",
"b64_body_string",
"=",
"base64",
".",
"b64encode",
"(",
"body_part",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"response",
"=",
"{",
"'message_id'",
":",
"self",
".",
"_message_id",
",",
"'data'",
":",
"b64_body_string",
",",
"}",
"if",
"self",
".",
"_last_response",
"is",
"None",
":",
"# This represents the first chunk of data to be streamed to the caller.",
"# Attach status and header information to this item.",
"response",
".",
"update",
"(",
"self",
".",
"_generate_metadata_body",
"(",
")",
")",
"else",
":",
"self",
".",
"_last_response",
"[",
"'done'",
"]",
"=",
"False",
"self",
".",
"_write_message_func",
"(",
"self",
".",
"_last_response",
")",
"self",
".",
"_last_response",
"=",
"response"
] |
Handles a streaming chunk of the response.
The streaming_response callback gives no indication about whether the
received chunk is the last in the stream. The "last_response" instance
variable allows us to keep track of the last received chunk of the
response. Each time this is called, the previous chunk is emitted. The
done() method is expected to be called after the response completes to
ensure that the last piece of data is sent.
Args:
body_part: A chunk of the streaming response.
|
[
"Handles",
"a",
"streaming",
"chunk",
"of",
"the",
"response",
"."
] |
python
|
train
| 38.678571 |
ibis-project/ibis
|
ibis/expr/api.py
|
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L1300-L1330
|
def quantile(arg, quantile, interpolation='linear'):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
quantile : float/int or array-like
0 <= quantile <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile
if scalar input, scalar type, same as input
if array input, list of scalar type
"""
if isinstance(quantile, collections.abc.Sequence):
op = ops.MultiQuantile(arg, quantile, interpolation)
else:
op = ops.Quantile(arg, quantile, interpolation)
return op.to_expr()
|
[
"def",
"quantile",
"(",
"arg",
",",
"quantile",
",",
"interpolation",
"=",
"'linear'",
")",
":",
"if",
"isinstance",
"(",
"quantile",
",",
"collections",
".",
"abc",
".",
"Sequence",
")",
":",
"op",
"=",
"ops",
".",
"MultiQuantile",
"(",
"arg",
",",
"quantile",
",",
"interpolation",
")",
"else",
":",
"op",
"=",
"ops",
".",
"Quantile",
"(",
"arg",
",",
"quantile",
",",
"interpolation",
")",
"return",
"op",
".",
"to_expr",
"(",
")"
] |
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
quantile : float/int or array-like
0 <= quantile <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile
if scalar input, scalar type, same as input
if array input, list of scalar type
|
[
"Return",
"value",
"at",
"the",
"given",
"quantile",
"a",
"la",
"numpy",
".",
"percentile",
"."
] |
python
|
train
| 34.193548 |
mapbox/mapbox-cli-py
|
mapboxcli/scripts/mapmatching.py
|
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/mapmatching.py#L15-L43
|
def match(ctx, features, profile, gps_precision):
"""Mapbox Map Matching API lets you use snap your GPS traces
to the OpenStreetMap road and path network.
$ mapbox mapmatching trace.geojson
An access token is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
features = list(features)
if len(features) != 1:
raise click.BadParameter(
"Mapmatching requires a single LineString feature")
service = mapbox.MapMatcher(access_token=access_token)
try:
res = service.match(
features[0],
profile=profile,
gps_precision=gps_precision)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if res.status_code == 200:
stdout = click.open_file('-', 'w')
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip())
|
[
"def",
"match",
"(",
"ctx",
",",
"features",
",",
"profile",
",",
"gps_precision",
")",
":",
"access_token",
"=",
"(",
"ctx",
".",
"obj",
"and",
"ctx",
".",
"obj",
".",
"get",
"(",
"'access_token'",
")",
")",
"or",
"None",
"features",
"=",
"list",
"(",
"features",
")",
"if",
"len",
"(",
"features",
")",
"!=",
"1",
":",
"raise",
"click",
".",
"BadParameter",
"(",
"\"Mapmatching requires a single LineString feature\"",
")",
"service",
"=",
"mapbox",
".",
"MapMatcher",
"(",
"access_token",
"=",
"access_token",
")",
"try",
":",
"res",
"=",
"service",
".",
"match",
"(",
"features",
"[",
"0",
"]",
",",
"profile",
"=",
"profile",
",",
"gps_precision",
"=",
"gps_precision",
")",
"except",
"mapbox",
".",
"errors",
".",
"ValidationError",
"as",
"exc",
":",
"raise",
"click",
".",
"BadParameter",
"(",
"str",
"(",
"exc",
")",
")",
"if",
"res",
".",
"status_code",
"==",
"200",
":",
"stdout",
"=",
"click",
".",
"open_file",
"(",
"'-'",
",",
"'w'",
")",
"click",
".",
"echo",
"(",
"res",
".",
"text",
",",
"file",
"=",
"stdout",
")",
"else",
":",
"raise",
"MapboxCLIException",
"(",
"res",
".",
"text",
".",
"strip",
"(",
")",
")"
] |
Mapbox Map Matching API lets you use snap your GPS traces
to the OpenStreetMap road and path network.
$ mapbox mapmatching trace.geojson
An access token is required, see `mapbox --help`.
|
[
"Mapbox",
"Map",
"Matching",
"API",
"lets",
"you",
"use",
"snap",
"your",
"GPS",
"traces",
"to",
"the",
"OpenStreetMap",
"road",
"and",
"path",
"network",
"."
] |
python
|
train
| 31.689655 |
aetros/aetros-cli
|
aetros/client.py
|
https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/client.py#L712-L740
|
def wait_for_at_least_one_message(self, channel):
"""
Reads until we receive at least one message we can unpack. Return all found messages.
"""
unpacker = msgpack.Unpacker(encoding='utf-8')
while True:
try:
start = time.time()
chunk = self.ssh_channel[channel].recv(1024)
end = time.time()
self.read_speeds.append( len(chunk) / (end-start) )
if len(self.read_speeds) > 20:
self.read_speeds = self.read_speeds[10:]
if chunk == b'':
# happens only when connection broke. If nothing is to be received, it hangs instead.
self.connection_error(channel, 'Connection broken w')
return False
except Exception as error:
self.connection_error(channel, error)
raise
unpacker.feed(chunk)
messages = [m for m in unpacker]
if messages:
return messages
|
[
"def",
"wait_for_at_least_one_message",
"(",
"self",
",",
"channel",
")",
":",
"unpacker",
"=",
"msgpack",
".",
"Unpacker",
"(",
"encoding",
"=",
"'utf-8'",
")",
"while",
"True",
":",
"try",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"chunk",
"=",
"self",
".",
"ssh_channel",
"[",
"channel",
"]",
".",
"recv",
"(",
"1024",
")",
"end",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"read_speeds",
".",
"append",
"(",
"len",
"(",
"chunk",
")",
"/",
"(",
"end",
"-",
"start",
")",
")",
"if",
"len",
"(",
"self",
".",
"read_speeds",
")",
">",
"20",
":",
"self",
".",
"read_speeds",
"=",
"self",
".",
"read_speeds",
"[",
"10",
":",
"]",
"if",
"chunk",
"==",
"b''",
":",
"# happens only when connection broke. If nothing is to be received, it hangs instead.",
"self",
".",
"connection_error",
"(",
"channel",
",",
"'Connection broken w'",
")",
"return",
"False",
"except",
"Exception",
"as",
"error",
":",
"self",
".",
"connection_error",
"(",
"channel",
",",
"error",
")",
"raise",
"unpacker",
".",
"feed",
"(",
"chunk",
")",
"messages",
"=",
"[",
"m",
"for",
"m",
"in",
"unpacker",
"]",
"if",
"messages",
":",
"return",
"messages"
] |
Reads until we receive at least one message we can unpack. Return all found messages.
|
[
"Reads",
"until",
"we",
"receive",
"at",
"least",
"one",
"message",
"we",
"can",
"unpack",
".",
"Return",
"all",
"found",
"messages",
"."
] |
python
|
train
| 35.793103 |
googleapis/oauth2client
|
oauth2client/contrib/keyring_storage.py
|
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/keyring_storage.py#L62-L78
|
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
content = keyring.get_password(self._service_name, self._user_name)
if content is not None:
try:
credentials = client.Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
|
[
"def",
"locked_get",
"(",
"self",
")",
":",
"credentials",
"=",
"None",
"content",
"=",
"keyring",
".",
"get_password",
"(",
"self",
".",
"_service_name",
",",
"self",
".",
"_user_name",
")",
"if",
"content",
"is",
"not",
"None",
":",
"try",
":",
"credentials",
"=",
"client",
".",
"Credentials",
".",
"new_from_json",
"(",
"content",
")",
"credentials",
".",
"set_store",
"(",
"self",
")",
"except",
"ValueError",
":",
"pass",
"return",
"credentials"
] |
Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
|
[
"Retrieve",
"Credential",
"from",
"file",
"."
] |
python
|
valid
| 27.647059 |
iotile/coretools
|
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/m4.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/m4.py#L40-L54
|
def generate(env):
"""Add Builders and construction variables for m4 to an Environment."""
M4Action = SCons.Action.Action('$M4COM', '$M4COMSTR')
bld = SCons.Builder.Builder(action = M4Action, src_suffix = '.m4')
env['BUILDERS']['M4'] = bld
# .m4 files might include other files, and it would be pretty hard
# to write a scanner for it, so let's just cd to the dir of the m4
# file and run from there.
# The src_suffix setup is like so: file.c.m4 -> file.c,
# file.cpp.m4 -> file.cpp etc.
env['M4'] = 'm4'
env['M4FLAGS'] = SCons.Util.CLVar('-E')
env['M4COM'] = 'cd ${SOURCE.rsrcdir} && $M4 $M4FLAGS < ${SOURCE.file} > ${TARGET.abspath}'
|
[
"def",
"generate",
"(",
"env",
")",
":",
"M4Action",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"'$M4COM'",
",",
"'$M4COMSTR'",
")",
"bld",
"=",
"SCons",
".",
"Builder",
".",
"Builder",
"(",
"action",
"=",
"M4Action",
",",
"src_suffix",
"=",
"'.m4'",
")",
"env",
"[",
"'BUILDERS'",
"]",
"[",
"'M4'",
"]",
"=",
"bld",
"# .m4 files might include other files, and it would be pretty hard",
"# to write a scanner for it, so let's just cd to the dir of the m4",
"# file and run from there.",
"# The src_suffix setup is like so: file.c.m4 -> file.c,",
"# file.cpp.m4 -> file.cpp etc.",
"env",
"[",
"'M4'",
"]",
"=",
"'m4'",
"env",
"[",
"'M4FLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"'-E'",
")",
"env",
"[",
"'M4COM'",
"]",
"=",
"'cd ${SOURCE.rsrcdir} && $M4 $M4FLAGS < ${SOURCE.file} > ${TARGET.abspath}'"
] |
Add Builders and construction variables for m4 to an Environment.
|
[
"Add",
"Builders",
"and",
"construction",
"variables",
"for",
"m4",
"to",
"an",
"Environment",
"."
] |
python
|
train
| 45.2 |
vertexproject/synapse
|
synapse/lib/module.py
|
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/module.py#L93-L107
|
def getModPath(self, *paths):
'''
Construct a path relative to this module's working directory.
Args:
*paths: A list of path strings
Notes:
This creates the module specific directory if it does not exist.
Returns:
(str): The full path (or None if no cortex dir is configured).
'''
dirn = self.getModDir()
return s_common.genpath(dirn, *paths)
|
[
"def",
"getModPath",
"(",
"self",
",",
"*",
"paths",
")",
":",
"dirn",
"=",
"self",
".",
"getModDir",
"(",
")",
"return",
"s_common",
".",
"genpath",
"(",
"dirn",
",",
"*",
"paths",
")"
] |
Construct a path relative to this module's working directory.
Args:
*paths: A list of path strings
Notes:
This creates the module specific directory if it does not exist.
Returns:
(str): The full path (or None if no cortex dir is configured).
|
[
"Construct",
"a",
"path",
"relative",
"to",
"this",
"module",
"s",
"working",
"directory",
"."
] |
python
|
train
| 28.733333 |
ARMmbed/mbed-cloud-sdk-python
|
src/mbed_cloud/_backends/enrollment/models/enrollment_id.py
|
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/enrollment/models/enrollment_id.py#L61-L74
|
def enrollment_identity(self, enrollment_identity):
"""
Sets the enrollment_identity of this EnrollmentId.
Enrollment identity.
:param enrollment_identity: The enrollment_identity of this EnrollmentId.
:type: str
"""
if enrollment_identity is None:
raise ValueError("Invalid value for `enrollment_identity`, must not be `None`")
if enrollment_identity is not None and not re.search('^A-[A-Za-z0-9:]{95}$', enrollment_identity):
raise ValueError("Invalid value for `enrollment_identity`, must be a follow pattern or equal to `/^A-[A-Za-z0-9:]{95}$/`")
self._enrollment_identity = enrollment_identity
|
[
"def",
"enrollment_identity",
"(",
"self",
",",
"enrollment_identity",
")",
":",
"if",
"enrollment_identity",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `enrollment_identity`, must not be `None`\"",
")",
"if",
"enrollment_identity",
"is",
"not",
"None",
"and",
"not",
"re",
".",
"search",
"(",
"'^A-[A-Za-z0-9:]{95}$'",
",",
"enrollment_identity",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `enrollment_identity`, must be a follow pattern or equal to `/^A-[A-Za-z0-9:]{95}$/`\"",
")",
"self",
".",
"_enrollment_identity",
"=",
"enrollment_identity"
] |
Sets the enrollment_identity of this EnrollmentId.
Enrollment identity.
:param enrollment_identity: The enrollment_identity of this EnrollmentId.
:type: str
|
[
"Sets",
"the",
"enrollment_identity",
"of",
"this",
"EnrollmentId",
".",
"Enrollment",
"identity",
"."
] |
python
|
train
| 48.785714 |
mahmoudimus/nose-timer
|
nosetimer/plugin.py
|
https://github.com/mahmoudimus/nose-timer/blob/3d8ff21ce3a68efd6cd018ea67c32f1da27ea3f9/nosetimer/plugin.py#L236-L241
|
def _colored_time(self, time_taken, color=None):
"""Get formatted and colored string for a given time taken."""
if self.timer_no_color:
return "{0:0.4f}s".format(time_taken)
return _colorize("{0:0.4f}s".format(time_taken), color)
|
[
"def",
"_colored_time",
"(",
"self",
",",
"time_taken",
",",
"color",
"=",
"None",
")",
":",
"if",
"self",
".",
"timer_no_color",
":",
"return",
"\"{0:0.4f}s\"",
".",
"format",
"(",
"time_taken",
")",
"return",
"_colorize",
"(",
"\"{0:0.4f}s\"",
".",
"format",
"(",
"time_taken",
")",
",",
"color",
")"
] |
Get formatted and colored string for a given time taken.
|
[
"Get",
"formatted",
"and",
"colored",
"string",
"for",
"a",
"given",
"time",
"taken",
"."
] |
python
|
train
| 43.5 |
chartbeat-labs/swailing
|
swailing/logger.py
|
https://github.com/chartbeat-labs/swailing/blob/d55e0dd7af59a2ba93f7c9c46ff56f6a4080b222/swailing/logger.py#L103-L106
|
def log(self, level, msg=None, *args, **kwargs):
"""Writes log out at any arbitray level."""
return self._log(level, msg, args, kwargs)
|
[
"def",
"log",
"(",
"self",
",",
"level",
",",
"msg",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_log",
"(",
"level",
",",
"msg",
",",
"args",
",",
"kwargs",
")"
] |
Writes log out at any arbitray level.
|
[
"Writes",
"log",
"out",
"at",
"any",
"arbitray",
"level",
"."
] |
python
|
train
| 37.25 |
ewels/MultiQC
|
multiqc/modules/deeptools/estimateReadFiltering.py
|
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/deeptools/estimateReadFiltering.py#L16-L129
|
def parse_estimateReadFiltering(self):
"""Find estimateReadFiltering output. Only the output from --table is supported."""
self.deeptools_estimateReadFiltering = dict()
for f in self.find_log_files('deeptools/estimateReadFiltering'):
parsed_data = self.parseEstimateReadFilteringFile(f)
for k, v in parsed_data.items():
if k in self.deeptools_estimateReadFiltering:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_estimateReadFiltering[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='estimateReadFiltering')
if len(self.deeptools_estimateReadFiltering) > 0:
header = OrderedDict()
header["M Entries"] = {
'title': 'M entries',
'description': 'Number of entries in the file (millions)'}
header["pct_Aligned"] = {
'title': '% Aligned',
'description': 'Percent of aligned entries',
'scale': 'YlGn',
'min': 0,
'max': 100
}
header["pct_Filtered"] = {
'title': '% Tot. Filtered',
'description': 'Percent of alignment that would be filtered for any reason.',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_Blacklisted"] = {
'title': '% Blacklisted',
'description': 'Percent of alignments falling (at least partially) inside a blacklisted region',
'scale': 'YlOrRd',
'min': 0,
'max': 100
}
header["pct_MAPQ"] = {
'title': '% MAPQ', 'description':
'Percent of alignments having MAPQ scores below the specified threshold',
'scale': 'YlOrBn',
'min': 0,
'max': 100
}
header["pct_Missing_Flags"] = {
'title': '% Missing Flags',
'description': 'Percent of alignments lacking at least on flag specified by --samFlagInclude',
'scale': 'PuRd',
'min': 0,
'max': 100
}
header["pct_Forbidden_Flags"] = {
'title': '% Forbidden Flags',
'description': 'Percent of alignments having at least one flag specified by --samFlagExclude',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_deepTools_Dupes"] = {
'title': '% deepTools Dupes',
'description': 'Percent of alignments marked by deepTools as being duplicates',
'scale': 'PuRd',
'min': 0,
'max': 100
}
header["pct_Duplication"] = {
'title': '% Duplication',
'description': 'Percent of alignments originally marked as being duplicates',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_Singletons"] = {
'title': '% Singletons',
'description': 'Percent of alignments that are singletons (i.e., paired-end reads where the mates don\'t align as a pair',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_Strand_Filtered"] = {
'title': '% Strand Filtered',
'description': 'Percent of alignments arising from the wrong strand',
'scale': 'OrRd',
'min': 0,
'max': 100
}
tdata = dict()
for k, v in self.deeptools_estimateReadFiltering.items():
tdata[k] = {
'M Entries': v['total'] / 1000000.0,
'pct_Aligned': 100. * v['mapped'] / float(v['total']),
'pct_Filtered': 100. * v['filtered'] / float(v['total']),
'pct_Blacklisted': 100. * v['blacklisted'] / float(v['total']),
'pct_Below_MAPQ': 100. * v['mapq'] / float(v['total']),
'pct_Missing_Flags': 100. * v['required flags'] / float(v['total']),
'pct_Forbidden_Flags': 100. * v['excluded flags'] / float(v['total']),
'pct_deepTools_Dupes': 100. * v['internal dupes'] / float(v['total']),
'pct_Duplication': 100. * v['dupes'] / float(v['total']),
'pct_Singletons': 100. * v['singletons'] / float(v['total']),
'pct_Strand_Filtered': 100. * v['strand'] / float(v['total'])
}
config = {'namespace': 'deepTools bamPEFragmentSize'}
self.add_section(
name = "Filtering metrics",
anchor = "estimateReadFiltering",
description = "Estimated percentages of alignments filtered independently for each setting in `estimateReadFiltering`",
plot = table.plot(tdata, header, config)
)
return len(self.deeptools_estimateReadFiltering)
|
[
"def",
"parse_estimateReadFiltering",
"(",
"self",
")",
":",
"self",
".",
"deeptools_estimateReadFiltering",
"=",
"dict",
"(",
")",
"for",
"f",
"in",
"self",
".",
"find_log_files",
"(",
"'deeptools/estimateReadFiltering'",
")",
":",
"parsed_data",
"=",
"self",
".",
"parseEstimateReadFilteringFile",
"(",
"f",
")",
"for",
"k",
",",
"v",
"in",
"parsed_data",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"self",
".",
"deeptools_estimateReadFiltering",
":",
"log",
".",
"warning",
"(",
"\"Replacing duplicate sample {}.\"",
".",
"format",
"(",
"k",
")",
")",
"self",
".",
"deeptools_estimateReadFiltering",
"[",
"k",
"]",
"=",
"v",
"if",
"len",
"(",
"parsed_data",
")",
">",
"0",
":",
"self",
".",
"add_data_source",
"(",
"f",
",",
"section",
"=",
"'estimateReadFiltering'",
")",
"if",
"len",
"(",
"self",
".",
"deeptools_estimateReadFiltering",
")",
">",
"0",
":",
"header",
"=",
"OrderedDict",
"(",
")",
"header",
"[",
"\"M Entries\"",
"]",
"=",
"{",
"'title'",
":",
"'M entries'",
",",
"'description'",
":",
"'Number of entries in the file (millions)'",
"}",
"header",
"[",
"\"pct_Aligned\"",
"]",
"=",
"{",
"'title'",
":",
"'% Aligned'",
",",
"'description'",
":",
"'Percent of aligned entries'",
",",
"'scale'",
":",
"'YlGn'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
"}",
"header",
"[",
"\"pct_Filtered\"",
"]",
"=",
"{",
"'title'",
":",
"'% Tot. Filtered'",
",",
"'description'",
":",
"'Percent of alignment that would be filtered for any reason.'",
",",
"'scale'",
":",
"'OrRd'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
"}",
"header",
"[",
"\"pct_Blacklisted\"",
"]",
"=",
"{",
"'title'",
":",
"'% Blacklisted'",
",",
"'description'",
":",
"'Percent of alignments falling (at least partially) inside a blacklisted region'",
",",
"'scale'",
":",
"'YlOrRd'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
"}",
"header",
"[",
"\"pct_MAPQ\"",
"]",
"=",
"{",
"'title'",
":",
"'% MAPQ'",
",",
"'description'",
":",
"'Percent of alignments having MAPQ scores below the specified threshold'",
",",
"'scale'",
":",
"'YlOrBn'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
"}",
"header",
"[",
"\"pct_Missing_Flags\"",
"]",
"=",
"{",
"'title'",
":",
"'% Missing Flags'",
",",
"'description'",
":",
"'Percent of alignments lacking at least on flag specified by --samFlagInclude'",
",",
"'scale'",
":",
"'PuRd'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
"}",
"header",
"[",
"\"pct_Forbidden_Flags\"",
"]",
"=",
"{",
"'title'",
":",
"'% Forbidden Flags'",
",",
"'description'",
":",
"'Percent of alignments having at least one flag specified by --samFlagExclude'",
",",
"'scale'",
":",
"'OrRd'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
"}",
"header",
"[",
"\"pct_deepTools_Dupes\"",
"]",
"=",
"{",
"'title'",
":",
"'% deepTools Dupes'",
",",
"'description'",
":",
"'Percent of alignments marked by deepTools as being duplicates'",
",",
"'scale'",
":",
"'PuRd'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
"}",
"header",
"[",
"\"pct_Duplication\"",
"]",
"=",
"{",
"'title'",
":",
"'% Duplication'",
",",
"'description'",
":",
"'Percent of alignments originally marked as being duplicates'",
",",
"'scale'",
":",
"'OrRd'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
"}",
"header",
"[",
"\"pct_Singletons\"",
"]",
"=",
"{",
"'title'",
":",
"'% Singletons'",
",",
"'description'",
":",
"'Percent of alignments that are singletons (i.e., paired-end reads where the mates don\\'t align as a pair'",
",",
"'scale'",
":",
"'OrRd'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
"}",
"header",
"[",
"\"pct_Strand_Filtered\"",
"]",
"=",
"{",
"'title'",
":",
"'% Strand Filtered'",
",",
"'description'",
":",
"'Percent of alignments arising from the wrong strand'",
",",
"'scale'",
":",
"'OrRd'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
"}",
"tdata",
"=",
"dict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"deeptools_estimateReadFiltering",
".",
"items",
"(",
")",
":",
"tdata",
"[",
"k",
"]",
"=",
"{",
"'M Entries'",
":",
"v",
"[",
"'total'",
"]",
"/",
"1000000.0",
",",
"'pct_Aligned'",
":",
"100.",
"*",
"v",
"[",
"'mapped'",
"]",
"/",
"float",
"(",
"v",
"[",
"'total'",
"]",
")",
",",
"'pct_Filtered'",
":",
"100.",
"*",
"v",
"[",
"'filtered'",
"]",
"/",
"float",
"(",
"v",
"[",
"'total'",
"]",
")",
",",
"'pct_Blacklisted'",
":",
"100.",
"*",
"v",
"[",
"'blacklisted'",
"]",
"/",
"float",
"(",
"v",
"[",
"'total'",
"]",
")",
",",
"'pct_Below_MAPQ'",
":",
"100.",
"*",
"v",
"[",
"'mapq'",
"]",
"/",
"float",
"(",
"v",
"[",
"'total'",
"]",
")",
",",
"'pct_Missing_Flags'",
":",
"100.",
"*",
"v",
"[",
"'required flags'",
"]",
"/",
"float",
"(",
"v",
"[",
"'total'",
"]",
")",
",",
"'pct_Forbidden_Flags'",
":",
"100.",
"*",
"v",
"[",
"'excluded flags'",
"]",
"/",
"float",
"(",
"v",
"[",
"'total'",
"]",
")",
",",
"'pct_deepTools_Dupes'",
":",
"100.",
"*",
"v",
"[",
"'internal dupes'",
"]",
"/",
"float",
"(",
"v",
"[",
"'total'",
"]",
")",
",",
"'pct_Duplication'",
":",
"100.",
"*",
"v",
"[",
"'dupes'",
"]",
"/",
"float",
"(",
"v",
"[",
"'total'",
"]",
")",
",",
"'pct_Singletons'",
":",
"100.",
"*",
"v",
"[",
"'singletons'",
"]",
"/",
"float",
"(",
"v",
"[",
"'total'",
"]",
")",
",",
"'pct_Strand_Filtered'",
":",
"100.",
"*",
"v",
"[",
"'strand'",
"]",
"/",
"float",
"(",
"v",
"[",
"'total'",
"]",
")",
"}",
"config",
"=",
"{",
"'namespace'",
":",
"'deepTools bamPEFragmentSize'",
"}",
"self",
".",
"add_section",
"(",
"name",
"=",
"\"Filtering metrics\"",
",",
"anchor",
"=",
"\"estimateReadFiltering\"",
",",
"description",
"=",
"\"Estimated percentages of alignments filtered independently for each setting in `estimateReadFiltering`\"",
",",
"plot",
"=",
"table",
".",
"plot",
"(",
"tdata",
",",
"header",
",",
"config",
")",
")",
"return",
"len",
"(",
"self",
".",
"deeptools_estimateReadFiltering",
")"
] |
Find estimateReadFiltering output. Only the output from --table is supported.
|
[
"Find",
"estimateReadFiltering",
"output",
".",
"Only",
"the",
"output",
"from",
"--",
"table",
"is",
"supported",
"."
] |
python
|
train
| 45.219298 |
petl-developers/petlx
|
petlx/bio/tabix.py
|
https://github.com/petl-developers/petlx/blob/54039e30388c7da12407d6b5c3cb865b00436004/petlx/bio/tabix.py#L10-L47
|
def fromtabix(filename, reference=None, start=None, stop=None, region=None,
header=None):
"""
Extract rows from a tabix indexed file, e.g.::
>>> import petl as etl
>>> # activate bio extensions
... import petlx.bio
>>> table1 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3')
>>> table1
+---------------+----------+----------+-----------------------------+
| #chrom | start | end | region |
+===============+==========+==========+=============================+
| 'Pf3D7_02_v3' | '0' | '23100' | 'SubtelomericRepeat' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '23100' | '105800' | 'SubtelomericHypervariable' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '447300' | '450450' | 'Centromere' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '450450' | '862500' | 'Core' |
+---------------+----------+----------+-----------------------------+
...
>>> table2 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3:110000-120000')
>>> table2
+---------------+----------+----------+--------+
| #chrom | start | end | region |
+===============+==========+==========+========+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+--------+
"""
return TabixView(filename, reference, start, stop, region, header)
|
[
"def",
"fromtabix",
"(",
"filename",
",",
"reference",
"=",
"None",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"region",
"=",
"None",
",",
"header",
"=",
"None",
")",
":",
"return",
"TabixView",
"(",
"filename",
",",
"reference",
",",
"start",
",",
"stop",
",",
"region",
",",
"header",
")"
] |
Extract rows from a tabix indexed file, e.g.::
>>> import petl as etl
>>> # activate bio extensions
... import petlx.bio
>>> table1 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3')
>>> table1
+---------------+----------+----------+-----------------------------+
| #chrom | start | end | region |
+===============+==========+==========+=============================+
| 'Pf3D7_02_v3' | '0' | '23100' | 'SubtelomericRepeat' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '23100' | '105800' | 'SubtelomericHypervariable' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '447300' | '450450' | 'Centromere' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '450450' | '862500' | 'Core' |
+---------------+----------+----------+-----------------------------+
...
>>> table2 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3:110000-120000')
>>> table2
+---------------+----------+----------+--------+
| #chrom | start | end | region |
+===============+==========+==========+========+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+--------+
|
[
"Extract",
"rows",
"from",
"a",
"tabix",
"indexed",
"file",
"e",
".",
"g",
".",
"::"
] |
python
|
train
| 50.078947 |
spyder-ide/spyder
|
spyder/plugins/projects/plugin.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/plugin.py#L478-L484
|
def is_valid_project(self, path):
"""Check if a directory is a valid Spyder project"""
spy_project_dir = osp.join(path, '.spyproject')
if osp.isdir(path) and osp.isdir(spy_project_dir):
return True
else:
return False
|
[
"def",
"is_valid_project",
"(",
"self",
",",
"path",
")",
":",
"spy_project_dir",
"=",
"osp",
".",
"join",
"(",
"path",
",",
"'.spyproject'",
")",
"if",
"osp",
".",
"isdir",
"(",
"path",
")",
"and",
"osp",
".",
"isdir",
"(",
"spy_project_dir",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Check if a directory is a valid Spyder project
|
[
"Check",
"if",
"a",
"directory",
"is",
"a",
"valid",
"Spyder",
"project"
] |
python
|
train
| 38.857143 |
elastic/elasticsearch-py
|
elasticsearch/client/xpack/data_frame.py
|
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/data_frame.py#L100-L118
|
def stop_data_frame_transform(self, transform_id, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-data-frame-transform.html>`_
:arg transform_id: The id of the transform to stop
:arg timeout: Controls the time to wait until the transform has stopped.
Default to 30 seconds
:arg wait_for_completion: Whether to wait for the transform to fully
stop before returning or not. Default to false
"""
if transform_id in SKIP_IN_PATH:
raise ValueError(
"Empty value passed for a required argument 'transform_id'."
)
return self.transport.perform_request(
"POST",
_make_path("_data_frame", "transforms", transform_id, "_stop"),
params=params,
)
|
[
"def",
"stop_data_frame_transform",
"(",
"self",
",",
"transform_id",
",",
"params",
"=",
"None",
")",
":",
"if",
"transform_id",
"in",
"SKIP_IN_PATH",
":",
"raise",
"ValueError",
"(",
"\"Empty value passed for a required argument 'transform_id'.\"",
")",
"return",
"self",
".",
"transport",
".",
"perform_request",
"(",
"\"POST\"",
",",
"_make_path",
"(",
"\"_data_frame\"",
",",
"\"transforms\"",
",",
"transform_id",
",",
"\"_stop\"",
")",
",",
"params",
"=",
"params",
",",
")"
] |
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-data-frame-transform.html>`_
:arg transform_id: The id of the transform to stop
:arg timeout: Controls the time to wait until the transform has stopped.
Default to 30 seconds
:arg wait_for_completion: Whether to wait for the transform to fully
stop before returning or not. Default to false
|
[
"<https",
":",
"//",
"www",
".",
"elastic",
".",
"co",
"/",
"guide",
"/",
"en",
"/",
"elasticsearch",
"/",
"reference",
"/",
"current",
"/",
"stop",
"-",
"data",
"-",
"frame",
"-",
"transform",
".",
"html",
">",
"_"
] |
python
|
train
| 43.684211 |
signalfx/signalfx-python
|
signalfx/rest.py
|
https://github.com/signalfx/signalfx-python/blob/650eb9a2b301bcc795e4e3a8c031574ade69849d/signalfx/rest.py#L326-L336
|
def delete_tag(self, tag_name, **kwargs):
"""delete a tag by name
Args:
tag_name (string): name of tag to delete
"""
resp = self._delete(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name),
**kwargs)
resp.raise_for_status()
# successful delete returns 204, which has no associated json
return resp
|
[
"def",
"delete_tag",
"(",
"self",
",",
"tag_name",
",",
"*",
"*",
"kwargs",
")",
":",
"resp",
"=",
"self",
".",
"_delete",
"(",
"self",
".",
"_u",
"(",
"self",
".",
"_TAG_ENDPOINT_SUFFIX",
",",
"tag_name",
")",
",",
"*",
"*",
"kwargs",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"# successful delete returns 204, which has no associated json",
"return",
"resp"
] |
delete a tag by name
Args:
tag_name (string): name of tag to delete
|
[
"delete",
"a",
"tag",
"by",
"name"
] |
python
|
train
| 34.272727 |
BerkeleyAutomation/perception
|
perception/detector.py
|
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/detector.py#L225-L298
|
def _segment_color(self, color_im, bounding_box, bgmodel, cfg, vis_segmentation=False):
""" Re-segments a color image to isolate an object of interest using foreground masking and kmeans """
# read params
foreground_mask_tolerance = cfg['foreground_mask_tolerance']
color_seg_rgb_weight = cfg['color_seg_rgb_weight']
color_seg_num_clusters = cfg['color_seg_num_clusters']
color_seg_hsv_weight = cfg['color_seg_hsv_weight']
color_seg_dist_pctile = cfg['color_seg_dist_pctile']
color_seg_dist_thresh = cfg['color_seg_dist_thresh']
color_seg_min_bg_dist = cfg['color_seg_min_bg_dist']
min_contour_area= cfg['min_contour_area']
contour_dist_thresh = cfg['contour_dist_thresh']
# foreground masking
binary_im = color_im.foreground_mask(foreground_mask_tolerance, bgmodel=bgmodel)
binary_im = binary_im.prune_contours(area_thresh=min_contour_area, dist_thresh=contour_dist_thresh)
if binary_im is None:
return None, None, None
color_im = color_im.mask_binary(binary_im)
# kmeans segmentation
segment_im = color_im.segment_kmeans(color_seg_rgb_weight,
color_seg_num_clusters,
hue_weight=color_seg_hsv_weight)
# keep the segment that is farthest from the background
bg_dists = []
hsv_bgmodel = 255 * np.array(colorsys.rgb_to_hsv(float(bgmodel[0]) / 255,
float(bgmodel[1]) / 255,
float(bgmodel[2]) / 255))
hsv_bgmodel = np.r_[color_seg_rgb_weight * np.array(bgmodel), color_seg_hsv_weight * hsv_bgmodel[:1]]
for k in range(segment_im.num_segments-1):
seg_mask = segment_im.segment_mask(k)
color_im_segment = color_im.mask_binary(seg_mask)
color_im_segment_data = color_im_segment.nonzero_data()
color_im_segment_data = np.c_[color_seg_rgb_weight * color_im_segment_data, color_seg_hsv_weight * color_im_segment.nonzero_hsv_data()[:,:1]]
# take the median distance from the background
bg_dist = np.median(np.linalg.norm(color_im_segment_data - hsv_bgmodel, axis=1))
if vis_segmentation:
logging.info('BG Dist for segment %d: %.4f' %(k, bg_dist))
bg_dists.append(bg_dist)
# sort by distance
dists_and_indices = zip(np.arange(len(bg_dists)), bg_dists)
dists_and_indices.sort(key = lambda x: x[1], reverse=True)
# mask out the segment in the binary image
if color_seg_num_clusters > 1 and abs(dists_and_indices[0][1] - dists_and_indices[1][1]) > color_seg_dist_thresh and dists_and_indices[1][1] < color_seg_min_bg_dist:
obj_segment = dists_and_indices[0][0]
obj_seg_mask = segment_im.segment_mask(obj_segment)
binary_im = binary_im.mask_binary(obj_seg_mask)
binary_im, diff_px = binary_im.center_nonzero()
bounding_box = Box(bounding_box.min_pt.astype(np.float32) - diff_px,
bounding_box.max_pt.astype(np.float32) - diff_px,
bounding_box.frame)
if vis_segmentation:
plt.figure()
plt.subplot(1,3,1)
plt.imshow(color_im.data)
plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(segment_im.data)
plt.colorbar()
plt.axis('off')
plt.subplot(1,3,3)
plt.imshow(binary_im.data, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
return binary_im, segment_im, bounding_box
|
[
"def",
"_segment_color",
"(",
"self",
",",
"color_im",
",",
"bounding_box",
",",
"bgmodel",
",",
"cfg",
",",
"vis_segmentation",
"=",
"False",
")",
":",
"# read params",
"foreground_mask_tolerance",
"=",
"cfg",
"[",
"'foreground_mask_tolerance'",
"]",
"color_seg_rgb_weight",
"=",
"cfg",
"[",
"'color_seg_rgb_weight'",
"]",
"color_seg_num_clusters",
"=",
"cfg",
"[",
"'color_seg_num_clusters'",
"]",
"color_seg_hsv_weight",
"=",
"cfg",
"[",
"'color_seg_hsv_weight'",
"]",
"color_seg_dist_pctile",
"=",
"cfg",
"[",
"'color_seg_dist_pctile'",
"]",
"color_seg_dist_thresh",
"=",
"cfg",
"[",
"'color_seg_dist_thresh'",
"]",
"color_seg_min_bg_dist",
"=",
"cfg",
"[",
"'color_seg_min_bg_dist'",
"]",
"min_contour_area",
"=",
"cfg",
"[",
"'min_contour_area'",
"]",
"contour_dist_thresh",
"=",
"cfg",
"[",
"'contour_dist_thresh'",
"]",
"# foreground masking",
"binary_im",
"=",
"color_im",
".",
"foreground_mask",
"(",
"foreground_mask_tolerance",
",",
"bgmodel",
"=",
"bgmodel",
")",
"binary_im",
"=",
"binary_im",
".",
"prune_contours",
"(",
"area_thresh",
"=",
"min_contour_area",
",",
"dist_thresh",
"=",
"contour_dist_thresh",
")",
"if",
"binary_im",
"is",
"None",
":",
"return",
"None",
",",
"None",
",",
"None",
"color_im",
"=",
"color_im",
".",
"mask_binary",
"(",
"binary_im",
")",
"# kmeans segmentation",
"segment_im",
"=",
"color_im",
".",
"segment_kmeans",
"(",
"color_seg_rgb_weight",
",",
"color_seg_num_clusters",
",",
"hue_weight",
"=",
"color_seg_hsv_weight",
")",
"# keep the segment that is farthest from the background",
"bg_dists",
"=",
"[",
"]",
"hsv_bgmodel",
"=",
"255",
"*",
"np",
".",
"array",
"(",
"colorsys",
".",
"rgb_to_hsv",
"(",
"float",
"(",
"bgmodel",
"[",
"0",
"]",
")",
"/",
"255",
",",
"float",
"(",
"bgmodel",
"[",
"1",
"]",
")",
"/",
"255",
",",
"float",
"(",
"bgmodel",
"[",
"2",
"]",
")",
"/",
"255",
")",
")",
"hsv_bgmodel",
"=",
"np",
".",
"r_",
"[",
"color_seg_rgb_weight",
"*",
"np",
".",
"array",
"(",
"bgmodel",
")",
",",
"color_seg_hsv_weight",
"*",
"hsv_bgmodel",
"[",
":",
"1",
"]",
"]",
"for",
"k",
"in",
"range",
"(",
"segment_im",
".",
"num_segments",
"-",
"1",
")",
":",
"seg_mask",
"=",
"segment_im",
".",
"segment_mask",
"(",
"k",
")",
"color_im_segment",
"=",
"color_im",
".",
"mask_binary",
"(",
"seg_mask",
")",
"color_im_segment_data",
"=",
"color_im_segment",
".",
"nonzero_data",
"(",
")",
"color_im_segment_data",
"=",
"np",
".",
"c_",
"[",
"color_seg_rgb_weight",
"*",
"color_im_segment_data",
",",
"color_seg_hsv_weight",
"*",
"color_im_segment",
".",
"nonzero_hsv_data",
"(",
")",
"[",
":",
",",
":",
"1",
"]",
"]",
"# take the median distance from the background",
"bg_dist",
"=",
"np",
".",
"median",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"color_im_segment_data",
"-",
"hsv_bgmodel",
",",
"axis",
"=",
"1",
")",
")",
"if",
"vis_segmentation",
":",
"logging",
".",
"info",
"(",
"'BG Dist for segment %d: %.4f'",
"%",
"(",
"k",
",",
"bg_dist",
")",
")",
"bg_dists",
".",
"append",
"(",
"bg_dist",
")",
"# sort by distance",
"dists_and_indices",
"=",
"zip",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"bg_dists",
")",
")",
",",
"bg_dists",
")",
"dists_and_indices",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"# mask out the segment in the binary image",
"if",
"color_seg_num_clusters",
">",
"1",
"and",
"abs",
"(",
"dists_and_indices",
"[",
"0",
"]",
"[",
"1",
"]",
"-",
"dists_and_indices",
"[",
"1",
"]",
"[",
"1",
"]",
")",
">",
"color_seg_dist_thresh",
"and",
"dists_and_indices",
"[",
"1",
"]",
"[",
"1",
"]",
"<",
"color_seg_min_bg_dist",
":",
"obj_segment",
"=",
"dists_and_indices",
"[",
"0",
"]",
"[",
"0",
"]",
"obj_seg_mask",
"=",
"segment_im",
".",
"segment_mask",
"(",
"obj_segment",
")",
"binary_im",
"=",
"binary_im",
".",
"mask_binary",
"(",
"obj_seg_mask",
")",
"binary_im",
",",
"diff_px",
"=",
"binary_im",
".",
"center_nonzero",
"(",
")",
"bounding_box",
"=",
"Box",
"(",
"bounding_box",
".",
"min_pt",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"-",
"diff_px",
",",
"bounding_box",
".",
"max_pt",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"-",
"diff_px",
",",
"bounding_box",
".",
"frame",
")",
"if",
"vis_segmentation",
":",
"plt",
".",
"figure",
"(",
")",
"plt",
".",
"subplot",
"(",
"1",
",",
"3",
",",
"1",
")",
"plt",
".",
"imshow",
"(",
"color_im",
".",
"data",
")",
"plt",
".",
"axis",
"(",
"'off'",
")",
"plt",
".",
"subplot",
"(",
"1",
",",
"3",
",",
"2",
")",
"plt",
".",
"imshow",
"(",
"segment_im",
".",
"data",
")",
"plt",
".",
"colorbar",
"(",
")",
"plt",
".",
"axis",
"(",
"'off'",
")",
"plt",
".",
"subplot",
"(",
"1",
",",
"3",
",",
"3",
")",
"plt",
".",
"imshow",
"(",
"binary_im",
".",
"data",
",",
"cmap",
"=",
"plt",
".",
"cm",
".",
"gray",
")",
"plt",
".",
"axis",
"(",
"'off'",
")",
"plt",
".",
"show",
"(",
")",
"return",
"binary_im",
",",
"segment_im",
",",
"bounding_box"
] |
Re-segments a color image to isolate an object of interest using foreground masking and kmeans
|
[
"Re",
"-",
"segments",
"a",
"color",
"image",
"to",
"isolate",
"an",
"object",
"of",
"interest",
"using",
"foreground",
"masking",
"and",
"kmeans"
] |
python
|
train
| 50.351351 |
berkeley-cocosci/Wallace
|
wallace/custom.py
|
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/custom.py#L228-L236
|
def quitter():
"""Overide the psiTurk quitter route."""
exp = experiment(session)
exp.log("Quitter route was hit.")
return Response(
dumps({"status": "success"}),
status=200,
mimetype='application/json')
|
[
"def",
"quitter",
"(",
")",
":",
"exp",
"=",
"experiment",
"(",
"session",
")",
"exp",
".",
"log",
"(",
"\"Quitter route was hit.\"",
")",
"return",
"Response",
"(",
"dumps",
"(",
"{",
"\"status\"",
":",
"\"success\"",
"}",
")",
",",
"status",
"=",
"200",
",",
"mimetype",
"=",
"'application/json'",
")"
] |
Overide the psiTurk quitter route.
|
[
"Overide",
"the",
"psiTurk",
"quitter",
"route",
"."
] |
python
|
train
| 26.222222 |
openknowledge-archive/datapackage-registry-py
|
datapackage_registry/registry.py
|
https://github.com/openknowledge-archive/datapackage-registry-py/blob/02ba4d1ae6a75d8960abef0ffec3e9ec49ed26f9/datapackage_registry/registry.py#L112-L131
|
def _load_json_file_or_url(self, json_path_or_url):
'''Return the JSON at the local path or URL as a dict
This method raises DataPackageRegistryException if there were any
errors.
'''
try:
if os.path.isfile(json_path_or_url):
with open(json_path_or_url, 'r') as f:
result = json.load(f)
else:
res = requests.get(json_path_or_url)
res.raise_for_status()
result = res.json()
return result
except (ValueError,
requests.exceptions.RequestException) as e:
six.raise_from(DataPackageRegistryException(e), e)
|
[
"def",
"_load_json_file_or_url",
"(",
"self",
",",
"json_path_or_url",
")",
":",
"try",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"json_path_or_url",
")",
":",
"with",
"open",
"(",
"json_path_or_url",
",",
"'r'",
")",
"as",
"f",
":",
"result",
"=",
"json",
".",
"load",
"(",
"f",
")",
"else",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"json_path_or_url",
")",
"res",
".",
"raise_for_status",
"(",
")",
"result",
"=",
"res",
".",
"json",
"(",
")",
"return",
"result",
"except",
"(",
"ValueError",
",",
"requests",
".",
"exceptions",
".",
"RequestException",
")",
"as",
"e",
":",
"six",
".",
"raise_from",
"(",
"DataPackageRegistryException",
"(",
"e",
")",
",",
"e",
")"
] |
Return the JSON at the local path or URL as a dict
This method raises DataPackageRegistryException if there were any
errors.
|
[
"Return",
"the",
"JSON",
"at",
"the",
"local",
"path",
"or",
"URL",
"as",
"a",
"dict"
] |
python
|
train
| 34.05 |
nerdvegas/rez
|
src/rez/vendor/pygraph/mixins/labeling.py
|
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/mixins/labeling.py#L79-L91
|
def set_edge_weight(self, edge, wt):
"""
Set the weight of an edge.
@type edge: edge
@param edge: One edge.
@type wt: number
@param wt: Edge weight.
"""
self.set_edge_properties(edge, weight=wt )
if not self.DIRECTED:
self.set_edge_properties((edge[1], edge[0]) , weight=wt )
|
[
"def",
"set_edge_weight",
"(",
"self",
",",
"edge",
",",
"wt",
")",
":",
"self",
".",
"set_edge_properties",
"(",
"edge",
",",
"weight",
"=",
"wt",
")",
"if",
"not",
"self",
".",
"DIRECTED",
":",
"self",
".",
"set_edge_properties",
"(",
"(",
"edge",
"[",
"1",
"]",
",",
"edge",
"[",
"0",
"]",
")",
",",
"weight",
"=",
"wt",
")"
] |
Set the weight of an edge.
@type edge: edge
@param edge: One edge.
@type wt: number
@param wt: Edge weight.
|
[
"Set",
"the",
"weight",
"of",
"an",
"edge",
"."
] |
python
|
train
| 27 |
acutesoftware/AIKIF
|
aikif/programs.py
|
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/programs.py#L127-L137
|
def get_file_info_web(self, fname, delim='<BR>\n'):
"""
gathers info on a python program in list and formats as string
"""
txt = ''
f = mod_file.File(fname[0])
txt += '<sup>' + f.name + '</sup>' + delim
txt += '<sup>' + fname[1] + '</sup>' + delim
txt += '<sub><sup><span white-space:nowrap;>' + f.GetDateAsString(f.date_modified)[2:10] + '</span></sup></sub>' + delim
txt += '<sup><sup>' + str(f.size) + '</sup></sup>' + delim
return txt + '\n'
|
[
"def",
"get_file_info_web",
"(",
"self",
",",
"fname",
",",
"delim",
"=",
"'<BR>\\n'",
")",
":",
"txt",
"=",
"''",
"f",
"=",
"mod_file",
".",
"File",
"(",
"fname",
"[",
"0",
"]",
")",
"txt",
"+=",
"'<sup>'",
"+",
"f",
".",
"name",
"+",
"'</sup>'",
"+",
"delim",
"txt",
"+=",
"'<sup>'",
"+",
"fname",
"[",
"1",
"]",
"+",
"'</sup>'",
"+",
"delim",
"txt",
"+=",
"'<sub><sup><span white-space:nowrap;>'",
"+",
"f",
".",
"GetDateAsString",
"(",
"f",
".",
"date_modified",
")",
"[",
"2",
":",
"10",
"]",
"+",
"'</span></sup></sub>'",
"+",
"delim",
"txt",
"+=",
"'<sup><sup>'",
"+",
"str",
"(",
"f",
".",
"size",
")",
"+",
"'</sup></sup>'",
"+",
"delim",
"return",
"txt",
"+",
"'\\n'"
] |
gathers info on a python program in list and formats as string
|
[
"gathers",
"info",
"on",
"a",
"python",
"program",
"in",
"list",
"and",
"formats",
"as",
"string"
] |
python
|
train
| 47.090909 |
tducret/precisionmapper-python
|
python-flask/swagger_server/models/base_model_.py
|
https://github.com/tducret/precisionmapper-python/blob/462dcc5bccf6edec780b8b7bc42e8c1d717db942/python-flask/swagger_server/models/base_model_.py#L21-L23
|
def from_dict(cls: typing.Type[T], dikt) -> T:
"""Returns the dict as a model"""
return util.deserialize_model(dikt, cls)
|
[
"def",
"from_dict",
"(",
"cls",
":",
"typing",
".",
"Type",
"[",
"T",
"]",
",",
"dikt",
")",
"->",
"T",
":",
"return",
"util",
".",
"deserialize_model",
"(",
"dikt",
",",
"cls",
")"
] |
Returns the dict as a model
|
[
"Returns",
"the",
"dict",
"as",
"a",
"model"
] |
python
|
train
| 45 |
gwpy/gwpy
|
gwpy/plot/gps.py
|
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/gps.py#L118-L147
|
def set_unit(self, unit):
"""Set the GPS step scale
"""
# accept all core time units
if unit is None or (isinstance(unit, units.NamedUnit) and
unit.physical_type == 'time'):
self._unit = unit
return
# convert float to custom unit in seconds
if isinstance(unit, Number):
unit = units.Unit(unit * units.second)
# otherwise, should be able to convert to a time unit
try:
unit = units.Unit(unit)
except ValueError as exc:
# catch annoying plurals
try:
unit = units.Unit(str(unit).rstrip('s'))
except ValueError:
raise exc
# decompose and check that it's actually a time unit
dec = unit.decompose()
if dec.bases != [units.second]:
raise ValueError("Cannot set GPS unit to %s" % unit)
# check equivalent units
for other in TIME_UNITS:
if other.decompose().scale == dec.scale:
self._unit = other
return
raise ValueError("Unrecognised unit: %s" % unit)
|
[
"def",
"set_unit",
"(",
"self",
",",
"unit",
")",
":",
"# accept all core time units",
"if",
"unit",
"is",
"None",
"or",
"(",
"isinstance",
"(",
"unit",
",",
"units",
".",
"NamedUnit",
")",
"and",
"unit",
".",
"physical_type",
"==",
"'time'",
")",
":",
"self",
".",
"_unit",
"=",
"unit",
"return",
"# convert float to custom unit in seconds",
"if",
"isinstance",
"(",
"unit",
",",
"Number",
")",
":",
"unit",
"=",
"units",
".",
"Unit",
"(",
"unit",
"*",
"units",
".",
"second",
")",
"# otherwise, should be able to convert to a time unit",
"try",
":",
"unit",
"=",
"units",
".",
"Unit",
"(",
"unit",
")",
"except",
"ValueError",
"as",
"exc",
":",
"# catch annoying plurals",
"try",
":",
"unit",
"=",
"units",
".",
"Unit",
"(",
"str",
"(",
"unit",
")",
".",
"rstrip",
"(",
"'s'",
")",
")",
"except",
"ValueError",
":",
"raise",
"exc",
"# decompose and check that it's actually a time unit",
"dec",
"=",
"unit",
".",
"decompose",
"(",
")",
"if",
"dec",
".",
"bases",
"!=",
"[",
"units",
".",
"second",
"]",
":",
"raise",
"ValueError",
"(",
"\"Cannot set GPS unit to %s\"",
"%",
"unit",
")",
"# check equivalent units",
"for",
"other",
"in",
"TIME_UNITS",
":",
"if",
"other",
".",
"decompose",
"(",
")",
".",
"scale",
"==",
"dec",
".",
"scale",
":",
"self",
".",
"_unit",
"=",
"other",
"return",
"raise",
"ValueError",
"(",
"\"Unrecognised unit: %s\"",
"%",
"unit",
")"
] |
Set the GPS step scale
|
[
"Set",
"the",
"GPS",
"step",
"scale"
] |
python
|
train
| 37.833333 |
ricequant/rqalpha
|
rqalpha/model/instrument.py
|
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L140-L149
|
def industry_code(self):
"""
[str] 国民经济行业分类代码,具体可参考“Industry列表” (股票专用)
"""
try:
return self.__dict__["industry_code"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'industry_code' ".format(self.order_book_id)
)
|
[
"def",
"industry_code",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"__dict__",
"[",
"\"industry_code\"",
"]",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
":",
"raise",
"AttributeError",
"(",
"\"Instrument(order_book_id={}) has no attribute 'industry_code' \"",
".",
"format",
"(",
"self",
".",
"order_book_id",
")",
")"
] |
[str] 国民经济行业分类代码,具体可参考“Industry列表” (股票专用)
|
[
"[",
"str",
"]",
"国民经济行业分类代码,具体可参考“Industry列表”",
"(股票专用)"
] |
python
|
train
| 34.7 |
mikedh/trimesh
|
trimesh/scene/scene.py
|
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/scene/scene.py#L411-L463
|
def set_camera(self,
angles=None,
distance=None,
center=None,
resolution=None,
fov=None):
"""
Create a camera object for self.camera, and add
a transform to self.graph for it.
If arguments are not passed sane defaults will be figured
out which show the mesh roughly centered.
Parameters
-----------
angles : (3,) float
Initial euler angles in radians
distance : float
Distance from centroid
center : (3,) float
Point camera should be center on
camera : Camera object
Object that stores camera parameters
"""
if fov is None:
fov = np.array([60, 45])
# if no geometry nothing to set camera to
if len(self.geometry) == 0:
return
# set with no rotation by default
if angles is None:
angles = np.zeros(3)
rotation = transformations.euler_matrix(*angles)
transform = cameras.look_at(self.bounds_corners,
fov=fov,
rotation=rotation,
distance=distance,
center=center)
if hasattr(self, '_camera') and self._camera is not None:
self.camera.fov = fov
self.camera._scene = self
self.camera.transform = transform
else:
# create a new camera object
self.camera = cameras.Camera(fov=fov,
scene=self,
transform=transform)
return self.camera
|
[
"def",
"set_camera",
"(",
"self",
",",
"angles",
"=",
"None",
",",
"distance",
"=",
"None",
",",
"center",
"=",
"None",
",",
"resolution",
"=",
"None",
",",
"fov",
"=",
"None",
")",
":",
"if",
"fov",
"is",
"None",
":",
"fov",
"=",
"np",
".",
"array",
"(",
"[",
"60",
",",
"45",
"]",
")",
"# if no geometry nothing to set camera to",
"if",
"len",
"(",
"self",
".",
"geometry",
")",
"==",
"0",
":",
"return",
"# set with no rotation by default",
"if",
"angles",
"is",
"None",
":",
"angles",
"=",
"np",
".",
"zeros",
"(",
"3",
")",
"rotation",
"=",
"transformations",
".",
"euler_matrix",
"(",
"*",
"angles",
")",
"transform",
"=",
"cameras",
".",
"look_at",
"(",
"self",
".",
"bounds_corners",
",",
"fov",
"=",
"fov",
",",
"rotation",
"=",
"rotation",
",",
"distance",
"=",
"distance",
",",
"center",
"=",
"center",
")",
"if",
"hasattr",
"(",
"self",
",",
"'_camera'",
")",
"and",
"self",
".",
"_camera",
"is",
"not",
"None",
":",
"self",
".",
"camera",
".",
"fov",
"=",
"fov",
"self",
".",
"camera",
".",
"_scene",
"=",
"self",
"self",
".",
"camera",
".",
"transform",
"=",
"transform",
"else",
":",
"# create a new camera object",
"self",
".",
"camera",
"=",
"cameras",
".",
"Camera",
"(",
"fov",
"=",
"fov",
",",
"scene",
"=",
"self",
",",
"transform",
"=",
"transform",
")",
"return",
"self",
".",
"camera"
] |
Create a camera object for self.camera, and add
a transform to self.graph for it.
If arguments are not passed sane defaults will be figured
out which show the mesh roughly centered.
Parameters
-----------
angles : (3,) float
Initial euler angles in radians
distance : float
Distance from centroid
center : (3,) float
Point camera should be center on
camera : Camera object
Object that stores camera parameters
|
[
"Create",
"a",
"camera",
"object",
"for",
"self",
".",
"camera",
"and",
"add",
"a",
"transform",
"to",
"self",
".",
"graph",
"for",
"it",
"."
] |
python
|
train
| 32.301887 |
jmwri/simplejwt
|
simplejwt/jwt.py
|
https://github.com/jmwri/simplejwt/blob/0828eaace0846918d2d202f5a60167a003e88b71/simplejwt/jwt.py#L30-L40
|
def get_algorithm(alg: str) -> Callable:
"""
:param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related.
:type alg: str
:return: The requested algorithm.
:rtype: Callable
:raises: ValueError
"""
if alg not in algorithms:
raise ValueError('Invalid algorithm: {:s}'.format(alg))
return algorithms[alg]
|
[
"def",
"get_algorithm",
"(",
"alg",
":",
"str",
")",
"->",
"Callable",
":",
"if",
"alg",
"not",
"in",
"algorithms",
":",
"raise",
"ValueError",
"(",
"'Invalid algorithm: {:s}'",
".",
"format",
"(",
"alg",
")",
")",
"return",
"algorithms",
"[",
"alg",
"]"
] |
:param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related.
:type alg: str
:return: The requested algorithm.
:rtype: Callable
:raises: ValueError
|
[
":",
"param",
"alg",
":",
"The",
"name",
"of",
"the",
"requested",
"JSON",
"Web",
"Algorithm",
"<https",
":",
"//",
"tools",
".",
"ietf",
".",
"org",
"/",
"html",
"/",
"rfc7519#ref",
"-",
"JWA",
">",
"_",
".",
"RFC7518",
"<https",
":",
"//",
"tools",
".",
"ietf",
".",
"org",
"/",
"html",
"/",
"rfc7518#section",
"-",
"3",
".",
"2",
">",
"_",
"is",
"related",
".",
":",
"type",
"alg",
":",
"str",
":",
"return",
":",
"The",
"requested",
"algorithm",
".",
":",
"rtype",
":",
"Callable",
":",
"raises",
":",
"ValueError"
] |
python
|
valid
| 41.181818 |
limix/glimix-core
|
glimix_core/lmm/_lmm.py
|
https://github.com/limix/glimix-core/blob/cddd0994591d100499cc41c1f480ddd575e7a980/glimix_core/lmm/_lmm.py#L209-L234
|
def beta_covariance(self):
"""
Estimates the covariance-matrix of the optimal beta.
Returns
-------
beta-covariance : ndarray
(Xᵀ(s((1-𝛿)K + 𝛿I))⁻¹X)⁻¹.
References
----------
.. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John
Wiley & Sons.
"""
from numpy_sugar.linalg import ddot
tX = self._X["tX"]
Q = concatenate(self._QS[0], axis=1)
S0 = self._QS[1]
D = self.v0 * S0 + self.v1
D = D.tolist() + [self.v1] * (len(self._y) - len(D))
D = asarray(D)
A = inv(tX.T @ (Q @ ddot(1 / D, Q.T @ tX)))
VT = self._X["VT"]
H = lstsq(VT, A, rcond=None)[0]
return lstsq(VT, H.T, rcond=None)[0]
|
[
"def",
"beta_covariance",
"(",
"self",
")",
":",
"from",
"numpy_sugar",
".",
"linalg",
"import",
"ddot",
"tX",
"=",
"self",
".",
"_X",
"[",
"\"tX\"",
"]",
"Q",
"=",
"concatenate",
"(",
"self",
".",
"_QS",
"[",
"0",
"]",
",",
"axis",
"=",
"1",
")",
"S0",
"=",
"self",
".",
"_QS",
"[",
"1",
"]",
"D",
"=",
"self",
".",
"v0",
"*",
"S0",
"+",
"self",
".",
"v1",
"D",
"=",
"D",
".",
"tolist",
"(",
")",
"+",
"[",
"self",
".",
"v1",
"]",
"*",
"(",
"len",
"(",
"self",
".",
"_y",
")",
"-",
"len",
"(",
"D",
")",
")",
"D",
"=",
"asarray",
"(",
"D",
")",
"A",
"=",
"inv",
"(",
"tX",
".",
"T",
"@",
"(",
"Q",
"@",
"ddot",
"(",
"1",
"/",
"D",
",",
"Q",
".",
"T",
"@",
"tX",
")",
")",
")",
"VT",
"=",
"self",
".",
"_X",
"[",
"\"VT\"",
"]",
"H",
"=",
"lstsq",
"(",
"VT",
",",
"A",
",",
"rcond",
"=",
"None",
")",
"[",
"0",
"]",
"return",
"lstsq",
"(",
"VT",
",",
"H",
".",
"T",
",",
"rcond",
"=",
"None",
")",
"[",
"0",
"]"
] |
Estimates the covariance-matrix of the optimal beta.
Returns
-------
beta-covariance : ndarray
(Xᵀ(s((1-𝛿)K + 𝛿I))⁻¹X)⁻¹.
References
----------
.. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John
Wiley & Sons.
|
[
"Estimates",
"the",
"covariance",
"-",
"matrix",
"of",
"the",
"optimal",
"beta",
"."
] |
python
|
valid
| 29.538462 |
rhayes777/PyAutoFit
|
autofit/mapper/model_mapper.py
|
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/mapper/model_mapper.py#L193-L202
|
def prior_prior_model_dict(self):
"""
Returns
-------
prior_prior_model_dict: {Prior: PriorModel}
A dictionary mapping priors to associated prior models. Each prior will only have one prior model; if a
prior is shared by two prior models then one of those prior models will be in this dictionary.
"""
return {prior: prior_model[1] for prior_model in self.prior_model_tuples for _, prior in
prior_model[1].prior_tuples}
|
[
"def",
"prior_prior_model_dict",
"(",
"self",
")",
":",
"return",
"{",
"prior",
":",
"prior_model",
"[",
"1",
"]",
"for",
"prior_model",
"in",
"self",
".",
"prior_model_tuples",
"for",
"_",
",",
"prior",
"in",
"prior_model",
"[",
"1",
"]",
".",
"prior_tuples",
"}"
] |
Returns
-------
prior_prior_model_dict: {Prior: PriorModel}
A dictionary mapping priors to associated prior models. Each prior will only have one prior model; if a
prior is shared by two prior models then one of those prior models will be in this dictionary.
|
[
"Returns",
"-------",
"prior_prior_model_dict",
":",
"{",
"Prior",
":",
"PriorModel",
"}",
"A",
"dictionary",
"mapping",
"priors",
"to",
"associated",
"prior",
"models",
".",
"Each",
"prior",
"will",
"only",
"have",
"one",
"prior",
"model",
";",
"if",
"a",
"prior",
"is",
"shared",
"by",
"two",
"prior",
"models",
"then",
"one",
"of",
"those",
"prior",
"models",
"will",
"be",
"in",
"this",
"dictionary",
"."
] |
python
|
train
| 49.7 |
materialsproject/pymatgen
|
pymatgen/analysis/eos.py
|
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/eos.py#L52-L73
|
def _initial_guess(self):
"""
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
"""
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.eos_params = [a, b, c]
v0 = -b/(2*a)
e0 = a*(v0**2) + b*v0 + c
b0 = 2 * a * v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = min(self.volumes), max(self.volumes)
if not vmin < v0 and v0 < vmax:
raise EOSError('The minimum volume of a fitted parabola is '
'not in the input volumes\n.')
return e0, b0, b1, v0
|
[
"def",
"_initial_guess",
"(",
"self",
")",
":",
"a",
",",
"b",
",",
"c",
"=",
"np",
".",
"polyfit",
"(",
"self",
".",
"volumes",
",",
"self",
".",
"energies",
",",
"2",
")",
"self",
".",
"eos_params",
"=",
"[",
"a",
",",
"b",
",",
"c",
"]",
"v0",
"=",
"-",
"b",
"/",
"(",
"2",
"*",
"a",
")",
"e0",
"=",
"a",
"*",
"(",
"v0",
"**",
"2",
")",
"+",
"b",
"*",
"v0",
"+",
"c",
"b0",
"=",
"2",
"*",
"a",
"*",
"v0",
"b1",
"=",
"4",
"# b1 is usually a small number like 4",
"vmin",
",",
"vmax",
"=",
"min",
"(",
"self",
".",
"volumes",
")",
",",
"max",
"(",
"self",
".",
"volumes",
")",
"if",
"not",
"vmin",
"<",
"v0",
"and",
"v0",
"<",
"vmax",
":",
"raise",
"EOSError",
"(",
"'The minimum volume of a fitted parabola is '",
"'not in the input volumes\\n.'",
")",
"return",
"e0",
",",
"b0",
",",
"b1",
",",
"v0"
] |
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
|
[
"Quadratic",
"fit",
"to",
"get",
"an",
"initial",
"guess",
"for",
"the",
"parameters",
"."
] |
python
|
train
| 29.181818 |
nathan-hoad/aiomanhole
|
aiomanhole/__init__.py
|
https://github.com/nathan-hoad/aiomanhole/blob/a13394c79e1878cde67aa2637ae5664df468ed04/aiomanhole/__init__.py#L88-L96
|
def handle_one_command(self):
"""Process a single command. May have many lines."""
while True:
yield from self.write_prompt()
codeobj = yield from self.read_command()
if codeobj is not None:
yield from self.run_command(codeobj)
|
[
"def",
"handle_one_command",
"(",
"self",
")",
":",
"while",
"True",
":",
"yield",
"from",
"self",
".",
"write_prompt",
"(",
")",
"codeobj",
"=",
"yield",
"from",
"self",
".",
"read_command",
"(",
")",
"if",
"codeobj",
"is",
"not",
"None",
":",
"yield",
"from",
"self",
".",
"run_command",
"(",
"codeobj",
")"
] |
Process a single command. May have many lines.
|
[
"Process",
"a",
"single",
"command",
".",
"May",
"have",
"many",
"lines",
"."
] |
python
|
train
| 32.111111 |
konstantint/matplotlib-venn
|
matplotlib_venn/_venn2.py
|
https://github.com/konstantint/matplotlib-venn/blob/c26796c9925bdac512edf48387452fbd1848c791/matplotlib_venn/_venn2.py#L183-L258
|
def venn2(subsets, set_labels=('A', 'B'), set_colors=('r', 'g'), alpha=0.4, normalize_to=1.0, ax=None, subset_label_formatter=None):
'''Plots a 2-set area-weighted Venn diagram.
The subsets parameter can be one of the following:
- A list (or a tuple) containing two set objects.
- A dict, providing sizes of three diagram regions.
The regions are identified via two-letter binary codes ('10', '01', and '11'), hence a valid set could look like:
{'10': 10, '01': 20, '11': 40}. Unmentioned codes are considered to map to 0.
- A list (or a tuple) with three numbers, denoting the sizes of the regions in the following order:
(10, 01, 11)
``set_labels`` parameter is a list of two strings - set labels. Set it to None to disable set labels.
The ``set_colors`` parameter should be a list of two elements, specifying the "base colors" of the two circles.
The color of circle intersection will be computed based on those.
The ``normalize_to`` parameter specifies the total (on-axes) area of the circles to be drawn. Sometimes tuning it (together
with the overall fiture size) may be useful to fit the text labels better.
The return value is a ``VennDiagram`` object, that keeps references to the ``Text`` and ``Patch`` objects used on the plot
and lets you know the centers and radii of the circles, if you need it.
The ``ax`` parameter specifies the axes on which the plot will be drawn (None means current axes).
The ``subset_label_formatter`` parameter is a function that can be passed to format the labels
that describe the size of each subset.
>>> from matplotlib_venn import *
>>> v = venn2(subsets={'10': 1, '01': 1, '11': 1}, set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=(1, 1, 1), linestyle='dashed')
>>> v.get_patch_by_id('10').set_alpha(1.0)
>>> v.get_patch_by_id('10').set_color('white')
>>> v.get_label_by_id('10').set_text('Unknown')
>>> v.get_label_by_id('A').set_text('Set A')
You can provide sets themselves rather than subset sizes:
>>> v = venn2(subsets=[set([1,2]), set([2,3,4,5])], set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=[set([1,2]), set([2,3,4,5])], linestyle='dashed')
>>> print("%0.2f" % (v.get_circle_radius(1)/v.get_circle_radius(0)))
1.41
'''
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in ['10', '01', '11']]
elif len(subsets) == 2:
subsets = compute_venn2_subsets(*subsets)
if subset_label_formatter is None:
subset_label_formatter = str
areas = compute_venn2_areas(subsets, normalize_to)
centers, radii = solve_venn2_circles(areas)
regions = compute_venn2_regions(centers, radii)
colors = compute_venn2_colors(set_colors)
if ax is None:
ax = gca()
prepare_venn_axes(ax, centers, radii)
# Create and add patches and subset labels
patches = [r.make_patch() for r in regions]
for (p, c) in zip(patches, colors):
if p is not None:
p.set_facecolor(c)
p.set_edgecolor('none')
p.set_alpha(alpha)
ax.add_patch(p)
label_positions = [r.label_position() for r in regions]
subset_labels = [ax.text(lbl[0], lbl[1], subset_label_formatter(s), va='center', ha='center') if lbl is not None else None for (lbl, s) in zip(label_positions, subsets)]
# Position set labels
if set_labels is not None:
padding = np.mean([r * 0.1 for r in radii])
label_positions = [centers[0] + np.array([0.0, - radii[0] - padding]),
centers[1] + np.array([0.0, - radii[1] - padding])]
labels = [ax.text(pos[0], pos[1], txt, size='large', ha='right', va='top') for (pos, txt) in zip(label_positions, set_labels)]
labels[1].set_ha('left')
else:
labels = None
return VennDiagram(patches, subset_labels, labels, centers, radii)
|
[
"def",
"venn2",
"(",
"subsets",
",",
"set_labels",
"=",
"(",
"'A'",
",",
"'B'",
")",
",",
"set_colors",
"=",
"(",
"'r'",
",",
"'g'",
")",
",",
"alpha",
"=",
"0.4",
",",
"normalize_to",
"=",
"1.0",
",",
"ax",
"=",
"None",
",",
"subset_label_formatter",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"subsets",
",",
"dict",
")",
":",
"subsets",
"=",
"[",
"subsets",
".",
"get",
"(",
"t",
",",
"0",
")",
"for",
"t",
"in",
"[",
"'10'",
",",
"'01'",
",",
"'11'",
"]",
"]",
"elif",
"len",
"(",
"subsets",
")",
"==",
"2",
":",
"subsets",
"=",
"compute_venn2_subsets",
"(",
"*",
"subsets",
")",
"if",
"subset_label_formatter",
"is",
"None",
":",
"subset_label_formatter",
"=",
"str",
"areas",
"=",
"compute_venn2_areas",
"(",
"subsets",
",",
"normalize_to",
")",
"centers",
",",
"radii",
"=",
"solve_venn2_circles",
"(",
"areas",
")",
"regions",
"=",
"compute_venn2_regions",
"(",
"centers",
",",
"radii",
")",
"colors",
"=",
"compute_venn2_colors",
"(",
"set_colors",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"gca",
"(",
")",
"prepare_venn_axes",
"(",
"ax",
",",
"centers",
",",
"radii",
")",
"# Create and add patches and subset labels",
"patches",
"=",
"[",
"r",
".",
"make_patch",
"(",
")",
"for",
"r",
"in",
"regions",
"]",
"for",
"(",
"p",
",",
"c",
")",
"in",
"zip",
"(",
"patches",
",",
"colors",
")",
":",
"if",
"p",
"is",
"not",
"None",
":",
"p",
".",
"set_facecolor",
"(",
"c",
")",
"p",
".",
"set_edgecolor",
"(",
"'none'",
")",
"p",
".",
"set_alpha",
"(",
"alpha",
")",
"ax",
".",
"add_patch",
"(",
"p",
")",
"label_positions",
"=",
"[",
"r",
".",
"label_position",
"(",
")",
"for",
"r",
"in",
"regions",
"]",
"subset_labels",
"=",
"[",
"ax",
".",
"text",
"(",
"lbl",
"[",
"0",
"]",
",",
"lbl",
"[",
"1",
"]",
",",
"subset_label_formatter",
"(",
"s",
")",
",",
"va",
"=",
"'center'",
",",
"ha",
"=",
"'center'",
")",
"if",
"lbl",
"is",
"not",
"None",
"else",
"None",
"for",
"(",
"lbl",
",",
"s",
")",
"in",
"zip",
"(",
"label_positions",
",",
"subsets",
")",
"]",
"# Position set labels",
"if",
"set_labels",
"is",
"not",
"None",
":",
"padding",
"=",
"np",
".",
"mean",
"(",
"[",
"r",
"*",
"0.1",
"for",
"r",
"in",
"radii",
"]",
")",
"label_positions",
"=",
"[",
"centers",
"[",
"0",
"]",
"+",
"np",
".",
"array",
"(",
"[",
"0.0",
",",
"-",
"radii",
"[",
"0",
"]",
"-",
"padding",
"]",
")",
",",
"centers",
"[",
"1",
"]",
"+",
"np",
".",
"array",
"(",
"[",
"0.0",
",",
"-",
"radii",
"[",
"1",
"]",
"-",
"padding",
"]",
")",
"]",
"labels",
"=",
"[",
"ax",
".",
"text",
"(",
"pos",
"[",
"0",
"]",
",",
"pos",
"[",
"1",
"]",
",",
"txt",
",",
"size",
"=",
"'large'",
",",
"ha",
"=",
"'right'",
",",
"va",
"=",
"'top'",
")",
"for",
"(",
"pos",
",",
"txt",
")",
"in",
"zip",
"(",
"label_positions",
",",
"set_labels",
")",
"]",
"labels",
"[",
"1",
"]",
".",
"set_ha",
"(",
"'left'",
")",
"else",
":",
"labels",
"=",
"None",
"return",
"VennDiagram",
"(",
"patches",
",",
"subset_labels",
",",
"labels",
",",
"centers",
",",
"radii",
")"
] |
Plots a 2-set area-weighted Venn diagram.
The subsets parameter can be one of the following:
- A list (or a tuple) containing two set objects.
- A dict, providing sizes of three diagram regions.
The regions are identified via two-letter binary codes ('10', '01', and '11'), hence a valid set could look like:
{'10': 10, '01': 20, '11': 40}. Unmentioned codes are considered to map to 0.
- A list (or a tuple) with three numbers, denoting the sizes of the regions in the following order:
(10, 01, 11)
``set_labels`` parameter is a list of two strings - set labels. Set it to None to disable set labels.
The ``set_colors`` parameter should be a list of two elements, specifying the "base colors" of the two circles.
The color of circle intersection will be computed based on those.
The ``normalize_to`` parameter specifies the total (on-axes) area of the circles to be drawn. Sometimes tuning it (together
with the overall fiture size) may be useful to fit the text labels better.
The return value is a ``VennDiagram`` object, that keeps references to the ``Text`` and ``Patch`` objects used on the plot
and lets you know the centers and radii of the circles, if you need it.
The ``ax`` parameter specifies the axes on which the plot will be drawn (None means current axes).
The ``subset_label_formatter`` parameter is a function that can be passed to format the labels
that describe the size of each subset.
>>> from matplotlib_venn import *
>>> v = venn2(subsets={'10': 1, '01': 1, '11': 1}, set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=(1, 1, 1), linestyle='dashed')
>>> v.get_patch_by_id('10').set_alpha(1.0)
>>> v.get_patch_by_id('10').set_color('white')
>>> v.get_label_by_id('10').set_text('Unknown')
>>> v.get_label_by_id('A').set_text('Set A')
You can provide sets themselves rather than subset sizes:
>>> v = venn2(subsets=[set([1,2]), set([2,3,4,5])], set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=[set([1,2]), set([2,3,4,5])], linestyle='dashed')
>>> print("%0.2f" % (v.get_circle_radius(1)/v.get_circle_radius(0)))
1.41
|
[
"Plots",
"a",
"2",
"-",
"set",
"area",
"-",
"weighted",
"Venn",
"diagram",
".",
"The",
"subsets",
"parameter",
"can",
"be",
"one",
"of",
"the",
"following",
":",
"-",
"A",
"list",
"(",
"or",
"a",
"tuple",
")",
"containing",
"two",
"set",
"objects",
".",
"-",
"A",
"dict",
"providing",
"sizes",
"of",
"three",
"diagram",
"regions",
".",
"The",
"regions",
"are",
"identified",
"via",
"two",
"-",
"letter",
"binary",
"codes",
"(",
"10",
"01",
"and",
"11",
")",
"hence",
"a",
"valid",
"set",
"could",
"look",
"like",
":",
"{",
"10",
":",
"10",
"01",
":",
"20",
"11",
":",
"40",
"}",
".",
"Unmentioned",
"codes",
"are",
"considered",
"to",
"map",
"to",
"0",
".",
"-",
"A",
"list",
"(",
"or",
"a",
"tuple",
")",
"with",
"three",
"numbers",
"denoting",
"the",
"sizes",
"of",
"the",
"regions",
"in",
"the",
"following",
"order",
":",
"(",
"10",
"01",
"11",
")"
] |
python
|
train
| 50.723684 |
rigetti/pyquil
|
pyquil/paulis.py
|
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/paulis.py#L952-L995
|
def trotterize(first_pauli_term, second_pauli_term, trotter_order=1,
trotter_steps=1):
"""
Create a Quil program that approximates exp( (A + B)t) where A and B are
PauliTerm operators.
:param PauliTerm first_pauli_term: PauliTerm denoted `A`
:param PauliTerm second_pauli_term: PauliTerm denoted `B`
:param int trotter_order: Optional argument indicating the Suzuki-Trotter
approximation order--only accepts orders 1, 2, 3, 4.
:param int trotter_steps: Optional argument indicating the number of products
to decompose the exponential into.
:return: Quil program
:rtype: Program
"""
if not (1 <= trotter_order < 5):
raise ValueError("trotterize only accepts trotter_order in {1, 2, 3, 4}.")
commutator = (first_pauli_term * second_pauli_term) + \
(-1 * second_pauli_term * first_pauli_term)
prog = Program()
if is_zero(commutator):
param_exp_prog_one = exponential_map(first_pauli_term)
exp_prog = param_exp_prog_one(1)
prog += exp_prog
param_exp_prog_two = exponential_map(second_pauli_term)
exp_prog = param_exp_prog_two(1)
prog += exp_prog
return prog
order_slices = suzuki_trotter(trotter_order, trotter_steps)
for coeff, operator in order_slices:
if operator == 0:
param_prog = exponential_map(coeff * first_pauli_term)
exp_prog = param_prog(1)
prog += exp_prog
else:
param_prog = exponential_map(coeff * second_pauli_term)
exp_prog = param_prog(1)
prog += exp_prog
return prog
|
[
"def",
"trotterize",
"(",
"first_pauli_term",
",",
"second_pauli_term",
",",
"trotter_order",
"=",
"1",
",",
"trotter_steps",
"=",
"1",
")",
":",
"if",
"not",
"(",
"1",
"<=",
"trotter_order",
"<",
"5",
")",
":",
"raise",
"ValueError",
"(",
"\"trotterize only accepts trotter_order in {1, 2, 3, 4}.\"",
")",
"commutator",
"=",
"(",
"first_pauli_term",
"*",
"second_pauli_term",
")",
"+",
"(",
"-",
"1",
"*",
"second_pauli_term",
"*",
"first_pauli_term",
")",
"prog",
"=",
"Program",
"(",
")",
"if",
"is_zero",
"(",
"commutator",
")",
":",
"param_exp_prog_one",
"=",
"exponential_map",
"(",
"first_pauli_term",
")",
"exp_prog",
"=",
"param_exp_prog_one",
"(",
"1",
")",
"prog",
"+=",
"exp_prog",
"param_exp_prog_two",
"=",
"exponential_map",
"(",
"second_pauli_term",
")",
"exp_prog",
"=",
"param_exp_prog_two",
"(",
"1",
")",
"prog",
"+=",
"exp_prog",
"return",
"prog",
"order_slices",
"=",
"suzuki_trotter",
"(",
"trotter_order",
",",
"trotter_steps",
")",
"for",
"coeff",
",",
"operator",
"in",
"order_slices",
":",
"if",
"operator",
"==",
"0",
":",
"param_prog",
"=",
"exponential_map",
"(",
"coeff",
"*",
"first_pauli_term",
")",
"exp_prog",
"=",
"param_prog",
"(",
"1",
")",
"prog",
"+=",
"exp_prog",
"else",
":",
"param_prog",
"=",
"exponential_map",
"(",
"coeff",
"*",
"second_pauli_term",
")",
"exp_prog",
"=",
"param_prog",
"(",
"1",
")",
"prog",
"+=",
"exp_prog",
"return",
"prog"
] |
Create a Quil program that approximates exp( (A + B)t) where A and B are
PauliTerm operators.
:param PauliTerm first_pauli_term: PauliTerm denoted `A`
:param PauliTerm second_pauli_term: PauliTerm denoted `B`
:param int trotter_order: Optional argument indicating the Suzuki-Trotter
approximation order--only accepts orders 1, 2, 3, 4.
:param int trotter_steps: Optional argument indicating the number of products
to decompose the exponential into.
:return: Quil program
:rtype: Program
|
[
"Create",
"a",
"Quil",
"program",
"that",
"approximates",
"exp",
"(",
"(",
"A",
"+",
"B",
")",
"t",
")",
"where",
"A",
"and",
"B",
"are",
"PauliTerm",
"operators",
"."
] |
python
|
train
| 37.454545 |
lrq3000/pyFileFixity
|
pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py
|
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/classtracker_stats.py#L115-L130
|
def dump_stats(self, fdump, close=True):
"""
Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if isinstance(fdump, type('')):
fdump = open(fdump, 'wb')
pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL)
if close:
fdump.close()
|
[
"def",
"dump_stats",
"(",
"self",
",",
"fdump",
",",
"close",
"=",
"True",
")",
":",
"if",
"self",
".",
"tracker",
":",
"self",
".",
"tracker",
".",
"stop_periodic_snapshots",
"(",
")",
"if",
"isinstance",
"(",
"fdump",
",",
"type",
"(",
"''",
")",
")",
":",
"fdump",
"=",
"open",
"(",
"fdump",
",",
"'wb'",
")",
"pickle",
".",
"dump",
"(",
"self",
".",
"index",
",",
"fdump",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"pickle",
".",
"dump",
"(",
"self",
".",
"snapshots",
",",
"fdump",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"if",
"close",
":",
"fdump",
".",
"close",
"(",
")"
] |
Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour).
|
[
"Dump",
"the",
"logged",
"data",
"to",
"a",
"file",
".",
"The",
"argument",
"file",
"can",
"be",
"either",
"a",
"filename",
"or",
"an",
"open",
"file",
"object",
"that",
"requires",
"write",
"access",
".",
"close",
"controls",
"if",
"the",
"file",
"is",
"closed",
"before",
"leaving",
"this",
"method",
"(",
"the",
"default",
"behaviour",
")",
"."
] |
python
|
train
| 40.5625 |
JelleAalbers/multihist
|
multihist.py
|
https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L545-L558
|
def average(self, axis):
"""Returns d-1 dimensional histogram of (estimated) mean value of axis
NB this is very different from averaging over the axis!!!
"""
axis = self.get_axis_number(axis)
avg_hist = np.ma.average(self.all_axis_bin_centers(axis),
weights=self.histogram, axis=axis)
if self.dimensions == 2:
new_hist = Hist1d
else:
new_hist = Histdd
return new_hist.from_histogram(histogram=avg_hist,
bin_edges=itemgetter(*self.other_axes(axis))(self.bin_edges),
axis_names=self.axis_names_without(axis))
|
[
"def",
"average",
"(",
"self",
",",
"axis",
")",
":",
"axis",
"=",
"self",
".",
"get_axis_number",
"(",
"axis",
")",
"avg_hist",
"=",
"np",
".",
"ma",
".",
"average",
"(",
"self",
".",
"all_axis_bin_centers",
"(",
"axis",
")",
",",
"weights",
"=",
"self",
".",
"histogram",
",",
"axis",
"=",
"axis",
")",
"if",
"self",
".",
"dimensions",
"==",
"2",
":",
"new_hist",
"=",
"Hist1d",
"else",
":",
"new_hist",
"=",
"Histdd",
"return",
"new_hist",
".",
"from_histogram",
"(",
"histogram",
"=",
"avg_hist",
",",
"bin_edges",
"=",
"itemgetter",
"(",
"*",
"self",
".",
"other_axes",
"(",
"axis",
")",
")",
"(",
"self",
".",
"bin_edges",
")",
",",
"axis_names",
"=",
"self",
".",
"axis_names_without",
"(",
"axis",
")",
")"
] |
Returns d-1 dimensional histogram of (estimated) mean value of axis
NB this is very different from averaging over the axis!!!
|
[
"Returns",
"d",
"-",
"1",
"dimensional",
"histogram",
"of",
"(",
"estimated",
")",
"mean",
"value",
"of",
"axis",
"NB",
"this",
"is",
"very",
"different",
"from",
"averaging",
"over",
"the",
"axis!!!"
] |
python
|
train
| 49.428571 |
dwavesystems/dwave_networkx
|
dwave_networkx/generators/chimera.py
|
https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/generators/chimera.py#L401-L418
|
def ints(self, qlist):
"""
Converts a sequence of chimera_index node labels into
linear_index node labels, preserving order
Parameters
----------
qlist : sequence of ints
The chimera_index node labels
Returns
-------
rlist : iterable of tuples
The linear_lindex node lables corresponding to qlist
"""
m, n, t = self.args
return (((n*i + j)*2 + u)*t + k for (i, j, u, k) in qlist)
|
[
"def",
"ints",
"(",
"self",
",",
"qlist",
")",
":",
"m",
",",
"n",
",",
"t",
"=",
"self",
".",
"args",
"return",
"(",
"(",
"(",
"n",
"*",
"i",
"+",
"j",
")",
"*",
"2",
"+",
"u",
")",
"*",
"t",
"+",
"k",
"for",
"(",
"i",
",",
"j",
",",
"u",
",",
"k",
")",
"in",
"qlist",
")"
] |
Converts a sequence of chimera_index node labels into
linear_index node labels, preserving order
Parameters
----------
qlist : sequence of ints
The chimera_index node labels
Returns
-------
rlist : iterable of tuples
The linear_lindex node lables corresponding to qlist
|
[
"Converts",
"a",
"sequence",
"of",
"chimera_index",
"node",
"labels",
"into",
"linear_index",
"node",
"labels",
"preserving",
"order"
] |
python
|
train
| 26.944444 |
pypa/pipenv
|
pipenv/vendor/jinja2/lexer.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/lexer.py#L178-L186
|
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
|
[
"def",
"describe_token_expr",
"(",
"expr",
")",
":",
"if",
"':'",
"in",
"expr",
":",
"type",
",",
"value",
"=",
"expr",
".",
"split",
"(",
"':'",
",",
"1",
")",
"if",
"type",
"==",
"'name'",
":",
"return",
"value",
"else",
":",
"type",
"=",
"expr",
"return",
"_describe_token_type",
"(",
"type",
")"
] |
Like `describe_token` but for token expressions.
|
[
"Like",
"describe_token",
"but",
"for",
"token",
"expressions",
"."
] |
python
|
train
| 29.111111 |
vtkiorg/vtki
|
vtki/geometric_objects.py
|
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/geometric_objects.py#L194-L236
|
def Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1,
i_resolution=10, j_resolution=10):
"""
Create a plane
Parameters
----------
center : list or np.ndarray
Location of the centroid in [x, y, z]
direction : list or np.ndarray
Direction cylinder points to in [x, y, z]
i_size : float
Size of the plane in the i direction.
j_size : float
Size of the plane in the i direction.
i_resolution : int
Number of points on the plane in the i direction.
j_resolution : int
Number of points on the plane in the j direction.
Returns
-------
plane : vtki.PolyData
Plane mesh
"""
planeSource = vtk.vtkPlaneSource()
planeSource.SetXResolution(i_resolution)
planeSource.SetYResolution(j_resolution)
planeSource.Update()
surf = PolyData(planeSource.GetOutput())
surf.points[:, 0] *= i_size
surf.points[:, 1] *= j_size
surf.rotate_y(-90)
translate(surf, center, direction)
return surf
|
[
"def",
"Plane",
"(",
"center",
"=",
"(",
"0",
",",
"0",
",",
"0",
")",
",",
"direction",
"=",
"(",
"0",
",",
"0",
",",
"1",
")",
",",
"i_size",
"=",
"1",
",",
"j_size",
"=",
"1",
",",
"i_resolution",
"=",
"10",
",",
"j_resolution",
"=",
"10",
")",
":",
"planeSource",
"=",
"vtk",
".",
"vtkPlaneSource",
"(",
")",
"planeSource",
".",
"SetXResolution",
"(",
"i_resolution",
")",
"planeSource",
".",
"SetYResolution",
"(",
"j_resolution",
")",
"planeSource",
".",
"Update",
"(",
")",
"surf",
"=",
"PolyData",
"(",
"planeSource",
".",
"GetOutput",
"(",
")",
")",
"surf",
".",
"points",
"[",
":",
",",
"0",
"]",
"*=",
"i_size",
"surf",
".",
"points",
"[",
":",
",",
"1",
"]",
"*=",
"j_size",
"surf",
".",
"rotate_y",
"(",
"-",
"90",
")",
"translate",
"(",
"surf",
",",
"center",
",",
"direction",
")",
"return",
"surf"
] |
Create a plane
Parameters
----------
center : list or np.ndarray
Location of the centroid in [x, y, z]
direction : list or np.ndarray
Direction cylinder points to in [x, y, z]
i_size : float
Size of the plane in the i direction.
j_size : float
Size of the plane in the i direction.
i_resolution : int
Number of points on the plane in the i direction.
j_resolution : int
Number of points on the plane in the j direction.
Returns
-------
plane : vtki.PolyData
Plane mesh
|
[
"Create",
"a",
"plane"
] |
python
|
train
| 23.534884 |
idank/bashlex
|
bashlex/parser.py
|
https://github.com/idank/bashlex/blob/800cb7e3c634eaa3c81f8a8648fd7fd4e27050ac/bashlex/parser.py#L15-L28
|
def p_inputunit(p):
'''inputunit : simple_list simple_list_terminator
| NEWLINE
| error NEWLINE
| EOF'''
# XXX
if p.lexer._parserstate & flags.parser.CMDSUBST:
p.lexer._parserstate.add(flags.parser.EOFTOKEN)
if isinstance(p[1], ast.node):
p[0] = p[1]
# accept right here in case the input contains more lines that are
# not part of the current command
p.accept()
|
[
"def",
"p_inputunit",
"(",
"p",
")",
":",
"# XXX",
"if",
"p",
".",
"lexer",
".",
"_parserstate",
"&",
"flags",
".",
"parser",
".",
"CMDSUBST",
":",
"p",
".",
"lexer",
".",
"_parserstate",
".",
"add",
"(",
"flags",
".",
"parser",
".",
"EOFTOKEN",
")",
"if",
"isinstance",
"(",
"p",
"[",
"1",
"]",
",",
"ast",
".",
"node",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
"# accept right here in case the input contains more lines that are",
"# not part of the current command",
"p",
".",
"accept",
"(",
")"
] |
inputunit : simple_list simple_list_terminator
| NEWLINE
| error NEWLINE
| EOF
|
[
"inputunit",
":",
"simple_list",
"simple_list_terminator",
"|",
"NEWLINE",
"|",
"error",
"NEWLINE",
"|",
"EOF"
] |
python
|
train
| 32.642857 |
numenta/nupic
|
src/nupic/algorithms/fdrutilities.py
|
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L65-L94
|
def generateCoincMatrix(nCoinc=10, length=500, activity=50):
"""
Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
coincMatrix0 = SM32(int(nCoinc), int(length))
theOnes = numpy.array([1.0] * activity, dtype=numpy.float32)
for rowIdx in xrange(nCoinc):
coinc = numpy.array(random.sample(xrange(length),
activity), dtype=numpy.uint32)
coinc.sort()
coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)
# This is the right code to use, it's faster, but it derails the unit
# testing of the pooling for now.
coincMatrix = SM32(int(nCoinc), int(length))
coincMatrix.initializeWithFixedNNZR(activity)
return coincMatrix0
|
[
"def",
"generateCoincMatrix",
"(",
"nCoinc",
"=",
"10",
",",
"length",
"=",
"500",
",",
"activity",
"=",
"50",
")",
":",
"coincMatrix0",
"=",
"SM32",
"(",
"int",
"(",
"nCoinc",
")",
",",
"int",
"(",
"length",
")",
")",
"theOnes",
"=",
"numpy",
".",
"array",
"(",
"[",
"1.0",
"]",
"*",
"activity",
",",
"dtype",
"=",
"numpy",
".",
"float32",
")",
"for",
"rowIdx",
"in",
"xrange",
"(",
"nCoinc",
")",
":",
"coinc",
"=",
"numpy",
".",
"array",
"(",
"random",
".",
"sample",
"(",
"xrange",
"(",
"length",
")",
",",
"activity",
")",
",",
"dtype",
"=",
"numpy",
".",
"uint32",
")",
"coinc",
".",
"sort",
"(",
")",
"coincMatrix0",
".",
"setRowFromSparse",
"(",
"rowIdx",
",",
"coinc",
",",
"theOnes",
")",
"# This is the right code to use, it's faster, but it derails the unit",
"# testing of the pooling for now.",
"coincMatrix",
"=",
"SM32",
"(",
"int",
"(",
"nCoinc",
")",
",",
"int",
"(",
"length",
")",
")",
"coincMatrix",
".",
"initializeWithFixedNNZR",
"(",
"activity",
")",
"return",
"coincMatrix0"
] |
Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
|
[
"Generate",
"a",
"coincidence",
"matrix",
".",
"This",
"is",
"used",
"to",
"generate",
"random",
"inputs",
"to",
"the",
"temporal",
"learner",
"and",
"to",
"compare",
"the",
"predicted",
"output",
"against",
"."
] |
python
|
valid
| 35.2 |
google/prettytensor
|
prettytensor/pretty_tensor_image_methods.py
|
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_image_methods.py#L75-L103
|
def max_pool(input_layer, kernel, stride, edges=PAD_SAME, name=PROVIDED):
"""Performs max pooling.
`kernel` is the patch that will be pooled and it describes the pooling along
each of the 4 dimensions. `stride` is how big to take each step.
Because more often than not, pooling is only done
on the width and height of the image, the following shorthands are supported:
* scalar (e.g. 3): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* singleton list (e.g. [3]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* list of length 2 (e.g. [3, 2]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 2, 1]`).
Args:
input_layer: The chainable object, supplied.
kernel: The size of the patch for the pool, either an int or a length 1 or
2 sequence (if length 1 or int, it is expanded).
stride: The strides as a length 1, 2 or 4 sequence or an integer. If an
int, length 1 or 2, the stride in the first and last dimensions are 1.
edges: Either `pt.PAD_SAME` or `pt.PAD_VALID` to control the padding.
name: The name for this operation is also used to create/find the
parameter variables.
Returns:
Handle to this layer.
"""
return _pool(input_layer, tf.nn.max_pool, kernel, stride, edges, name)
|
[
"def",
"max_pool",
"(",
"input_layer",
",",
"kernel",
",",
"stride",
",",
"edges",
"=",
"PAD_SAME",
",",
"name",
"=",
"PROVIDED",
")",
":",
"return",
"_pool",
"(",
"input_layer",
",",
"tf",
".",
"nn",
".",
"max_pool",
",",
"kernel",
",",
"stride",
",",
"edges",
",",
"name",
")"
] |
Performs max pooling.
`kernel` is the patch that will be pooled and it describes the pooling along
each of the 4 dimensions. `stride` is how big to take each step.
Because more often than not, pooling is only done
on the width and height of the image, the following shorthands are supported:
* scalar (e.g. 3): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* singleton list (e.g. [3]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* list of length 2 (e.g. [3, 2]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 2, 1]`).
Args:
input_layer: The chainable object, supplied.
kernel: The size of the patch for the pool, either an int or a length 1 or
2 sequence (if length 1 or int, it is expanded).
stride: The strides as a length 1, 2 or 4 sequence or an integer. If an
int, length 1 or 2, the stride in the first and last dimensions are 1.
edges: Either `pt.PAD_SAME` or `pt.PAD_VALID` to control the padding.
name: The name for this operation is also used to create/find the
parameter variables.
Returns:
Handle to this layer.
|
[
"Performs",
"max",
"pooling",
"."
] |
python
|
train
| 43.965517 |
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/completion_lexer.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/completion_lexer.py#L20-L61
|
def get_context(self, string):
""" Assuming the cursor is at the end of the specified string, get the
context (a list of names) for the symbol at cursor position.
"""
context = []
reversed_tokens = list(self._lexer.get_tokens(string))
reversed_tokens.reverse()
# Pygments often tacks on a newline when none is specified in the input.
# Remove this newline.
if reversed_tokens and reversed_tokens[0][1].endswith('\n') and \
not string.endswith('\n'):
reversed_tokens.pop(0)
current_op = ''
for token, text in reversed_tokens:
if is_token_subtype(token, Token.Name):
# Handle a trailing separator, e.g 'foo.bar.'
if current_op in self._name_separators:
if not context:
context.insert(0, '')
# Handle non-separator operators and punction.
elif current_op:
break
context.insert(0, text)
current_op = ''
# Pygments doesn't understand that, e.g., '->' is a single operator
# in C++. This is why we have to build up an operator from
# potentially several tokens.
elif token is Token.Operator or token is Token.Punctuation:
current_op = text + current_op
# Break on anything that is not a Operator, Punctuation, or Name.
else:
break
return context
|
[
"def",
"get_context",
"(",
"self",
",",
"string",
")",
":",
"context",
"=",
"[",
"]",
"reversed_tokens",
"=",
"list",
"(",
"self",
".",
"_lexer",
".",
"get_tokens",
"(",
"string",
")",
")",
"reversed_tokens",
".",
"reverse",
"(",
")",
"# Pygments often tacks on a newline when none is specified in the input.",
"# Remove this newline.",
"if",
"reversed_tokens",
"and",
"reversed_tokens",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"endswith",
"(",
"'\\n'",
")",
"and",
"not",
"string",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"reversed_tokens",
".",
"pop",
"(",
"0",
")",
"current_op",
"=",
"''",
"for",
"token",
",",
"text",
"in",
"reversed_tokens",
":",
"if",
"is_token_subtype",
"(",
"token",
",",
"Token",
".",
"Name",
")",
":",
"# Handle a trailing separator, e.g 'foo.bar.'",
"if",
"current_op",
"in",
"self",
".",
"_name_separators",
":",
"if",
"not",
"context",
":",
"context",
".",
"insert",
"(",
"0",
",",
"''",
")",
"# Handle non-separator operators and punction.",
"elif",
"current_op",
":",
"break",
"context",
".",
"insert",
"(",
"0",
",",
"text",
")",
"current_op",
"=",
"''",
"# Pygments doesn't understand that, e.g., '->' is a single operator",
"# in C++. This is why we have to build up an operator from",
"# potentially several tokens.",
"elif",
"token",
"is",
"Token",
".",
"Operator",
"or",
"token",
"is",
"Token",
".",
"Punctuation",
":",
"current_op",
"=",
"text",
"+",
"current_op",
"# Break on anything that is not a Operator, Punctuation, or Name.",
"else",
":",
"break",
"return",
"context"
] |
Assuming the cursor is at the end of the specified string, get the
context (a list of names) for the symbol at cursor position.
|
[
"Assuming",
"the",
"cursor",
"is",
"at",
"the",
"end",
"of",
"the",
"specified",
"string",
"get",
"the",
"context",
"(",
"a",
"list",
"of",
"names",
")",
"for",
"the",
"symbol",
"at",
"cursor",
"position",
"."
] |
python
|
test
| 35.97619 |
vecnet/vecnet.openmalaria
|
vecnet/openmalaria/scenario/interventions.py
|
https://github.com/vecnet/vecnet.openmalaria/blob/795bc9d1b81a6c664f14879edda7a7c41188e95a/vecnet/openmalaria/scenario/interventions.py#L989-L1000
|
def anopheles(self):
"""
:rtype: Anopheles
"""
list_of_anopheles = []
desc = self.et.find("description")
if desc is not None:
for anopheles in desc.findall("anopheles"):
list_of_anopheles.append(Anopheles(anopheles))
return list_of_anopheles
|
[
"def",
"anopheles",
"(",
"self",
")",
":",
"list_of_anopheles",
"=",
"[",
"]",
"desc",
"=",
"self",
".",
"et",
".",
"find",
"(",
"\"description\"",
")",
"if",
"desc",
"is",
"not",
"None",
":",
"for",
"anopheles",
"in",
"desc",
".",
"findall",
"(",
"\"anopheles\"",
")",
":",
"list_of_anopheles",
".",
"append",
"(",
"Anopheles",
"(",
"anopheles",
")",
")",
"return",
"list_of_anopheles"
] |
:rtype: Anopheles
|
[
":",
"rtype",
":",
"Anopheles"
] |
python
|
train
| 26.333333 |
frnmst/md-toc
|
md_toc/cli.py
|
https://github.com/frnmst/md-toc/blob/86d2002ecf52fa9e1e5316a31f7eb7d549cb0830/md_toc/cli.py#L85-L229
|
def create_parser(self):
"""Create the CLI parser."""
parser = argparse.ArgumentParser(
description=PROGRAM_DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(PROGRAM_EPILOG))
parser.add_argument(
'filename',
metavar='FILE_NAME',
nargs='*',
help='the I/O file name')
subparsers = parser.add_subparsers(
dest='parser', title='markdown parser')
subparsers.required = True
# github + cmark + gitlab + commonmarker.
github = subparsers.add_parser(
'github',
aliases=['cmark', 'gitlab', 'commonmarker'],
description='Use Commonmark rules to generate an output. If no \
option is selected, the default output will be an \
unordered list with the respective default values \
as listed below')
megroup = github.add_mutually_exclusive_group()
megroup.add_argument(
'-u',
'--unordered-list-marker',
choices=md_parser['github']['list']['unordered']['bullet_markers'],
nargs='?',
const=md_parser['github']['list']['unordered']['default_marker'],
help='set the marker and enables unordered list. Defaults to ' +
md_parser['github']['list']['unordered']['default_marker'])
megroup.add_argument(
'-o',
'--ordered-list-marker',
choices=md_parser['github']['list']['ordered']['closing_markers'],
nargs='?',
const=md_parser['github']['list']['ordered']
['default_closing_marker'],
help='set the marker and enables ordered lists. Defaults to ' +
md_parser['github']['list']['ordered']['default_closing_marker'])
github.add_argument(
'-l',
'--header-levels',
choices=[
str(i)
for i in range(1, md_parser['github']['header']['max_levels'] +
1)
],
nargs='?',
const=str(md_parser['github']['header']['default_keep_levels']),
help='set the maximum level of headers to be considered as part \
of the TOC. Defaults to ' + str(
md_parser['github']['header']['default_keep_levels']))
github.set_defaults(
header_levels=md_parser['github']['header']['default_keep_levels'])
# Redcarpet.
redcarpet = subparsers.add_parser(
'redcarpet',
description='Use Redcarpet rules to generate an output. If no \
option is selected, the default output will be an \
unordered list with the respective default values \
as listed below. Gitlab rules are the same as \
Redcarpet except that conflicts are avoided with \
duplicate headers.')
megroup = redcarpet.add_mutually_exclusive_group()
megroup.add_argument(
'-u',
'--unordered-list-marker',
choices=md_parser['redcarpet']['list']['unordered']
['bullet_markers'],
nargs='?',
const=md_parser['redcarpet']['list']['unordered']
['default_marker'],
help='set the marker and enables unordered list. Defaults to ' +
md_parser['redcarpet']['list']['unordered']['default_marker'])
megroup.add_argument(
'-o',
'--ordered-list-marker',
choices=md_parser['redcarpet']['list']['ordered']
['closing_markers'],
nargs='?',
const=md_parser['redcarpet']['list']['ordered']
['default_closing_marker'],
help='set the marker and enables ordered lists. Defaults to ' +
md_parser['redcarpet']['list']['ordered']['default_closing_marker']
)
redcarpet.add_argument(
'-l',
'--header-levels',
choices=[
str(i) for i in range(
1, md_parser['redcarpet']['header']['max_levels'] + 1)
],
nargs='?',
const=str(md_parser['redcarpet']['header']['default_keep_levels']),
help='set the maximum level of headers to be considered as part \
of the TOC. Defaults to ' + str(
md_parser['redcarpet']['header']['default_keep_levels']))
redcarpet.set_defaults(header_levels=md_parser['redcarpet']['header']
['default_keep_levels'])
c_or_i = parser.add_mutually_exclusive_group()
c_or_i.add_argument(
'-c',
'--no-list-coherence',
action='store_true',
help='avoids checking for TOC list coherence')
c_or_i.add_argument(
'-i',
'--no-indentation',
action='store_true',
help='avoids adding indentations to the TOC')
parser.add_argument(
'-l',
'--no-links',
action='store_true',
help='avoids adding links to the TOC')
parser.add_argument(
'-m',
'--toc-marker',
metavar='TOC_MARKER',
help='set the string to be used as the marker for positioning the \
table of contents. Defaults to ' +
common_defaults['toc_marker'])
parser.add_argument(
'-p',
'--in-place',
action='store_true',
help='overwrite the input file')
parser.add_argument(
'-v',
'--version',
action='version',
version=VERSION_NAME + ' ' + VERSION_NUMBER)
parser.set_defaults(toc_marker=common_defaults['toc_marker'])
parser.set_defaults(func=CliToApi().write_toc)
return parser
|
[
"def",
"create_parser",
"(",
"self",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"PROGRAM_DESCRIPTION",
",",
"formatter_class",
"=",
"argparse",
".",
"RawDescriptionHelpFormatter",
",",
"epilog",
"=",
"textwrap",
".",
"dedent",
"(",
"PROGRAM_EPILOG",
")",
")",
"parser",
".",
"add_argument",
"(",
"'filename'",
",",
"metavar",
"=",
"'FILE_NAME'",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"'the I/O file name'",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"dest",
"=",
"'parser'",
",",
"title",
"=",
"'markdown parser'",
")",
"subparsers",
".",
"required",
"=",
"True",
"# github + cmark + gitlab + commonmarker.",
"github",
"=",
"subparsers",
".",
"add_parser",
"(",
"'github'",
",",
"aliases",
"=",
"[",
"'cmark'",
",",
"'gitlab'",
",",
"'commonmarker'",
"]",
",",
"description",
"=",
"'Use Commonmark rules to generate an output. If no \\\n option is selected, the default output will be an \\\n unordered list with the respective default values \\\n as listed below'",
")",
"megroup",
"=",
"github",
".",
"add_mutually_exclusive_group",
"(",
")",
"megroup",
".",
"add_argument",
"(",
"'-u'",
",",
"'--unordered-list-marker'",
",",
"choices",
"=",
"md_parser",
"[",
"'github'",
"]",
"[",
"'list'",
"]",
"[",
"'unordered'",
"]",
"[",
"'bullet_markers'",
"]",
",",
"nargs",
"=",
"'?'",
",",
"const",
"=",
"md_parser",
"[",
"'github'",
"]",
"[",
"'list'",
"]",
"[",
"'unordered'",
"]",
"[",
"'default_marker'",
"]",
",",
"help",
"=",
"'set the marker and enables unordered list. Defaults to '",
"+",
"md_parser",
"[",
"'github'",
"]",
"[",
"'list'",
"]",
"[",
"'unordered'",
"]",
"[",
"'default_marker'",
"]",
")",
"megroup",
".",
"add_argument",
"(",
"'-o'",
",",
"'--ordered-list-marker'",
",",
"choices",
"=",
"md_parser",
"[",
"'github'",
"]",
"[",
"'list'",
"]",
"[",
"'ordered'",
"]",
"[",
"'closing_markers'",
"]",
",",
"nargs",
"=",
"'?'",
",",
"const",
"=",
"md_parser",
"[",
"'github'",
"]",
"[",
"'list'",
"]",
"[",
"'ordered'",
"]",
"[",
"'default_closing_marker'",
"]",
",",
"help",
"=",
"'set the marker and enables ordered lists. Defaults to '",
"+",
"md_parser",
"[",
"'github'",
"]",
"[",
"'list'",
"]",
"[",
"'ordered'",
"]",
"[",
"'default_closing_marker'",
"]",
")",
"github",
".",
"add_argument",
"(",
"'-l'",
",",
"'--header-levels'",
",",
"choices",
"=",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"md_parser",
"[",
"'github'",
"]",
"[",
"'header'",
"]",
"[",
"'max_levels'",
"]",
"+",
"1",
")",
"]",
",",
"nargs",
"=",
"'?'",
",",
"const",
"=",
"str",
"(",
"md_parser",
"[",
"'github'",
"]",
"[",
"'header'",
"]",
"[",
"'default_keep_levels'",
"]",
")",
",",
"help",
"=",
"'set the maximum level of headers to be considered as part \\\n of the TOC. Defaults to '",
"+",
"str",
"(",
"md_parser",
"[",
"'github'",
"]",
"[",
"'header'",
"]",
"[",
"'default_keep_levels'",
"]",
")",
")",
"github",
".",
"set_defaults",
"(",
"header_levels",
"=",
"md_parser",
"[",
"'github'",
"]",
"[",
"'header'",
"]",
"[",
"'default_keep_levels'",
"]",
")",
"# Redcarpet.",
"redcarpet",
"=",
"subparsers",
".",
"add_parser",
"(",
"'redcarpet'",
",",
"description",
"=",
"'Use Redcarpet rules to generate an output. If no \\\n option is selected, the default output will be an \\\n unordered list with the respective default values \\\n as listed below. Gitlab rules are the same as \\\n Redcarpet except that conflicts are avoided with \\\n duplicate headers.'",
")",
"megroup",
"=",
"redcarpet",
".",
"add_mutually_exclusive_group",
"(",
")",
"megroup",
".",
"add_argument",
"(",
"'-u'",
",",
"'--unordered-list-marker'",
",",
"choices",
"=",
"md_parser",
"[",
"'redcarpet'",
"]",
"[",
"'list'",
"]",
"[",
"'unordered'",
"]",
"[",
"'bullet_markers'",
"]",
",",
"nargs",
"=",
"'?'",
",",
"const",
"=",
"md_parser",
"[",
"'redcarpet'",
"]",
"[",
"'list'",
"]",
"[",
"'unordered'",
"]",
"[",
"'default_marker'",
"]",
",",
"help",
"=",
"'set the marker and enables unordered list. Defaults to '",
"+",
"md_parser",
"[",
"'redcarpet'",
"]",
"[",
"'list'",
"]",
"[",
"'unordered'",
"]",
"[",
"'default_marker'",
"]",
")",
"megroup",
".",
"add_argument",
"(",
"'-o'",
",",
"'--ordered-list-marker'",
",",
"choices",
"=",
"md_parser",
"[",
"'redcarpet'",
"]",
"[",
"'list'",
"]",
"[",
"'ordered'",
"]",
"[",
"'closing_markers'",
"]",
",",
"nargs",
"=",
"'?'",
",",
"const",
"=",
"md_parser",
"[",
"'redcarpet'",
"]",
"[",
"'list'",
"]",
"[",
"'ordered'",
"]",
"[",
"'default_closing_marker'",
"]",
",",
"help",
"=",
"'set the marker and enables ordered lists. Defaults to '",
"+",
"md_parser",
"[",
"'redcarpet'",
"]",
"[",
"'list'",
"]",
"[",
"'ordered'",
"]",
"[",
"'default_closing_marker'",
"]",
")",
"redcarpet",
".",
"add_argument",
"(",
"'-l'",
",",
"'--header-levels'",
",",
"choices",
"=",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"md_parser",
"[",
"'redcarpet'",
"]",
"[",
"'header'",
"]",
"[",
"'max_levels'",
"]",
"+",
"1",
")",
"]",
",",
"nargs",
"=",
"'?'",
",",
"const",
"=",
"str",
"(",
"md_parser",
"[",
"'redcarpet'",
"]",
"[",
"'header'",
"]",
"[",
"'default_keep_levels'",
"]",
")",
",",
"help",
"=",
"'set the maximum level of headers to be considered as part \\\n of the TOC. Defaults to '",
"+",
"str",
"(",
"md_parser",
"[",
"'redcarpet'",
"]",
"[",
"'header'",
"]",
"[",
"'default_keep_levels'",
"]",
")",
")",
"redcarpet",
".",
"set_defaults",
"(",
"header_levels",
"=",
"md_parser",
"[",
"'redcarpet'",
"]",
"[",
"'header'",
"]",
"[",
"'default_keep_levels'",
"]",
")",
"c_or_i",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
")",
"c_or_i",
".",
"add_argument",
"(",
"'-c'",
",",
"'--no-list-coherence'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'avoids checking for TOC list coherence'",
")",
"c_or_i",
".",
"add_argument",
"(",
"'-i'",
",",
"'--no-indentation'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'avoids adding indentations to the TOC'",
")",
"parser",
".",
"add_argument",
"(",
"'-l'",
",",
"'--no-links'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'avoids adding links to the TOC'",
")",
"parser",
".",
"add_argument",
"(",
"'-m'",
",",
"'--toc-marker'",
",",
"metavar",
"=",
"'TOC_MARKER'",
",",
"help",
"=",
"'set the string to be used as the marker for positioning the \\\n table of contents. Defaults to '",
"+",
"common_defaults",
"[",
"'toc_marker'",
"]",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--in-place'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'overwrite the input file'",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"VERSION_NAME",
"+",
"' '",
"+",
"VERSION_NUMBER",
")",
"parser",
".",
"set_defaults",
"(",
"toc_marker",
"=",
"common_defaults",
"[",
"'toc_marker'",
"]",
")",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
"CliToApi",
"(",
")",
".",
"write_toc",
")",
"return",
"parser"
] |
Create the CLI parser.
|
[
"Create",
"the",
"CLI",
"parser",
"."
] |
python
|
train
| 40.606897 |
planetlabs/planet-client-python
|
planet/scripts/cli.py
|
https://github.com/planetlabs/planet-client-python/blob/1c62ce7d416819951dddee0c22068fef6d40b027/planet/scripts/cli.py#L66-L75
|
def cli(context, verbose, api_key, base_url, workers):
'''Planet API Client'''
configure_logging(verbose)
client_params.clear()
client_params['api_key'] = api_key
client_params['workers'] = workers
if base_url:
client_params['base_url'] = base_url
|
[
"def",
"cli",
"(",
"context",
",",
"verbose",
",",
"api_key",
",",
"base_url",
",",
"workers",
")",
":",
"configure_logging",
"(",
"verbose",
")",
"client_params",
".",
"clear",
"(",
")",
"client_params",
"[",
"'api_key'",
"]",
"=",
"api_key",
"client_params",
"[",
"'workers'",
"]",
"=",
"workers",
"if",
"base_url",
":",
"client_params",
"[",
"'base_url'",
"]",
"=",
"base_url"
] |
Planet API Client
|
[
"Planet",
"API",
"Client"
] |
python
|
train
| 27.2 |
google/apitools
|
apitools/base/py/base_api.py
|
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/base_api.py#L658-L687
|
def PrepareHttpRequest(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
"""Prepares an HTTP request to be sent."""
request_type = _LoadClass(
method_config.request_type_name, self.__client.MESSAGES_MODULE)
util.Typecheck(request, request_type)
request = self.__client.ProcessRequest(method_config, request)
http_request = http_wrapper.Request(
http_method=method_config.http_method)
self.__SetBaseHeaders(http_request, self.__client)
self.__SetBody(http_request, method_config, request, upload)
url_builder = _UrlBuilder(
self.__client.url, relative_path=method_config.relative_path)
url_builder.query_params = self.__ConstructQueryParams(
method_config.query_params, request, global_params)
# It's important that upload and download go before we fill in the
# relative path, so that they can replace it.
if upload is not None:
upload.ConfigureRequest(upload_config, http_request, url_builder)
if download is not None:
download.ConfigureRequest(http_request, url_builder)
url_builder.relative_path = self.__ConstructRelativePath(
method_config, request, relative_path=url_builder.relative_path)
self.__FinalizeRequest(http_request, url_builder)
return self.__client.ProcessHttpRequest(http_request)
|
[
"def",
"PrepareHttpRequest",
"(",
"self",
",",
"method_config",
",",
"request",
",",
"global_params",
"=",
"None",
",",
"upload",
"=",
"None",
",",
"upload_config",
"=",
"None",
",",
"download",
"=",
"None",
")",
":",
"request_type",
"=",
"_LoadClass",
"(",
"method_config",
".",
"request_type_name",
",",
"self",
".",
"__client",
".",
"MESSAGES_MODULE",
")",
"util",
".",
"Typecheck",
"(",
"request",
",",
"request_type",
")",
"request",
"=",
"self",
".",
"__client",
".",
"ProcessRequest",
"(",
"method_config",
",",
"request",
")",
"http_request",
"=",
"http_wrapper",
".",
"Request",
"(",
"http_method",
"=",
"method_config",
".",
"http_method",
")",
"self",
".",
"__SetBaseHeaders",
"(",
"http_request",
",",
"self",
".",
"__client",
")",
"self",
".",
"__SetBody",
"(",
"http_request",
",",
"method_config",
",",
"request",
",",
"upload",
")",
"url_builder",
"=",
"_UrlBuilder",
"(",
"self",
".",
"__client",
".",
"url",
",",
"relative_path",
"=",
"method_config",
".",
"relative_path",
")",
"url_builder",
".",
"query_params",
"=",
"self",
".",
"__ConstructQueryParams",
"(",
"method_config",
".",
"query_params",
",",
"request",
",",
"global_params",
")",
"# It's important that upload and download go before we fill in the",
"# relative path, so that they can replace it.",
"if",
"upload",
"is",
"not",
"None",
":",
"upload",
".",
"ConfigureRequest",
"(",
"upload_config",
",",
"http_request",
",",
"url_builder",
")",
"if",
"download",
"is",
"not",
"None",
":",
"download",
".",
"ConfigureRequest",
"(",
"http_request",
",",
"url_builder",
")",
"url_builder",
".",
"relative_path",
"=",
"self",
".",
"__ConstructRelativePath",
"(",
"method_config",
",",
"request",
",",
"relative_path",
"=",
"url_builder",
".",
"relative_path",
")",
"self",
".",
"__FinalizeRequest",
"(",
"http_request",
",",
"url_builder",
")",
"return",
"self",
".",
"__client",
".",
"ProcessHttpRequest",
"(",
"http_request",
")"
] |
Prepares an HTTP request to be sent.
|
[
"Prepares",
"an",
"HTTP",
"request",
"to",
"be",
"sent",
"."
] |
python
|
train
| 48.766667 |
tnkteja/myhelp
|
virtualEnvironment/lib/python2.7/site-packages/twine/utils.py
|
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/twine/utils.py#L89-L114
|
def get_userpass_value(cli_value, config, key, prompt_strategy):
"""Gets the username / password from config.
Uses the following rules:
1. If it is specified on the cli (`cli_value`), use that.
2. If `config[key]` is specified, use that.
3. Otherwise prompt using `prompt_strategy`.
:param cli_value: The value supplied from the command line or `None`.
:type cli_value: unicode or `None`
:param config: Config dictionary
:type config: dict
:param key: Key to find the config value.
:type key: unicode
:prompt_strategy: Argumentless function to return fallback value.
:type prompt_strategy: function
:returns: The value for the username / password
:rtype: unicode
"""
if cli_value is not None:
return cli_value
elif config.get(key):
return config[key]
else:
return prompt_strategy()
|
[
"def",
"get_userpass_value",
"(",
"cli_value",
",",
"config",
",",
"key",
",",
"prompt_strategy",
")",
":",
"if",
"cli_value",
"is",
"not",
"None",
":",
"return",
"cli_value",
"elif",
"config",
".",
"get",
"(",
"key",
")",
":",
"return",
"config",
"[",
"key",
"]",
"else",
":",
"return",
"prompt_strategy",
"(",
")"
] |
Gets the username / password from config.
Uses the following rules:
1. If it is specified on the cli (`cli_value`), use that.
2. If `config[key]` is specified, use that.
3. Otherwise prompt using `prompt_strategy`.
:param cli_value: The value supplied from the command line or `None`.
:type cli_value: unicode or `None`
:param config: Config dictionary
:type config: dict
:param key: Key to find the config value.
:type key: unicode
:prompt_strategy: Argumentless function to return fallback value.
:type prompt_strategy: function
:returns: The value for the username / password
:rtype: unicode
|
[
"Gets",
"the",
"username",
"/",
"password",
"from",
"config",
"."
] |
python
|
test
| 33.038462 |
django-parler/django-parler
|
parler/utils/views.py
|
https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/utils/views.py#L31-L72
|
def get_language_tabs(request, current_language, available_languages, css_class=None):
"""
Determine the language tabs to show.
"""
tabs = TabsList(css_class=css_class)
get = request.GET.copy() # QueryDict object
tab_languages = []
site_id = getattr(settings, 'SITE_ID', None)
for lang_dict in appsettings.PARLER_LANGUAGES.get(site_id, ()):
code = lang_dict['code']
title = get_language_title(code)
get['language'] = code
url = '?{0}'.format(get.urlencode())
if code == current_language:
status = 'current'
elif code in available_languages:
status = 'available'
else:
status = 'empty'
tabs.append((url, title, code, status))
tab_languages.append(code)
# Additional stale translations in the database?
if appsettings.PARLER_SHOW_EXCLUDED_LANGUAGE_TABS:
for code in available_languages:
if code not in tab_languages:
get['language'] = code
url = '?{0}'.format(get.urlencode())
if code == current_language:
status = 'current'
else:
status = 'available'
tabs.append((url, get_language_title(code), code, status))
tabs.current_is_translated = current_language in available_languages
tabs.allow_deletion = len(available_languages) > 1
return tabs
|
[
"def",
"get_language_tabs",
"(",
"request",
",",
"current_language",
",",
"available_languages",
",",
"css_class",
"=",
"None",
")",
":",
"tabs",
"=",
"TabsList",
"(",
"css_class",
"=",
"css_class",
")",
"get",
"=",
"request",
".",
"GET",
".",
"copy",
"(",
")",
"# QueryDict object",
"tab_languages",
"=",
"[",
"]",
"site_id",
"=",
"getattr",
"(",
"settings",
",",
"'SITE_ID'",
",",
"None",
")",
"for",
"lang_dict",
"in",
"appsettings",
".",
"PARLER_LANGUAGES",
".",
"get",
"(",
"site_id",
",",
"(",
")",
")",
":",
"code",
"=",
"lang_dict",
"[",
"'code'",
"]",
"title",
"=",
"get_language_title",
"(",
"code",
")",
"get",
"[",
"'language'",
"]",
"=",
"code",
"url",
"=",
"'?{0}'",
".",
"format",
"(",
"get",
".",
"urlencode",
"(",
")",
")",
"if",
"code",
"==",
"current_language",
":",
"status",
"=",
"'current'",
"elif",
"code",
"in",
"available_languages",
":",
"status",
"=",
"'available'",
"else",
":",
"status",
"=",
"'empty'",
"tabs",
".",
"append",
"(",
"(",
"url",
",",
"title",
",",
"code",
",",
"status",
")",
")",
"tab_languages",
".",
"append",
"(",
"code",
")",
"# Additional stale translations in the database?",
"if",
"appsettings",
".",
"PARLER_SHOW_EXCLUDED_LANGUAGE_TABS",
":",
"for",
"code",
"in",
"available_languages",
":",
"if",
"code",
"not",
"in",
"tab_languages",
":",
"get",
"[",
"'language'",
"]",
"=",
"code",
"url",
"=",
"'?{0}'",
".",
"format",
"(",
"get",
".",
"urlencode",
"(",
")",
")",
"if",
"code",
"==",
"current_language",
":",
"status",
"=",
"'current'",
"else",
":",
"status",
"=",
"'available'",
"tabs",
".",
"append",
"(",
"(",
"url",
",",
"get_language_title",
"(",
"code",
")",
",",
"code",
",",
"status",
")",
")",
"tabs",
".",
"current_is_translated",
"=",
"current_language",
"in",
"available_languages",
"tabs",
".",
"allow_deletion",
"=",
"len",
"(",
"available_languages",
")",
">",
"1",
"return",
"tabs"
] |
Determine the language tabs to show.
|
[
"Determine",
"the",
"language",
"tabs",
"to",
"show",
"."
] |
python
|
train
| 33.5 |
StorjOld/heartbeat
|
heartbeat/OneHash/OneHash.py
|
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/OneHash/OneHash.py#L92-L110
|
def generate_challenges(self, num, root_seed):
""" Generate the specified number of hash challenges.
:param num: The number of hash challenges we want to generate.
:param root_seed: Some value that we use to generate our seeds from.
"""
# Generate a series of seeds
seeds = self.generate_seeds(num, root_seed, self.secret)
blocks = self.pick_blocks(num, root_seed)
# List of 2-tuples (seed, hash_response)
self.challenges = []
# Generate the corresponding hash for each seed
for i in range(num):
self.challenges.append(Challenge(blocks[i], seeds[i]))
response = self.meet_challenge(self.challenges[i])
self.challenges[i].response = response
|
[
"def",
"generate_challenges",
"(",
"self",
",",
"num",
",",
"root_seed",
")",
":",
"# Generate a series of seeds",
"seeds",
"=",
"self",
".",
"generate_seeds",
"(",
"num",
",",
"root_seed",
",",
"self",
".",
"secret",
")",
"blocks",
"=",
"self",
".",
"pick_blocks",
"(",
"num",
",",
"root_seed",
")",
"# List of 2-tuples (seed, hash_response)",
"self",
".",
"challenges",
"=",
"[",
"]",
"# Generate the corresponding hash for each seed",
"for",
"i",
"in",
"range",
"(",
"num",
")",
":",
"self",
".",
"challenges",
".",
"append",
"(",
"Challenge",
"(",
"blocks",
"[",
"i",
"]",
",",
"seeds",
"[",
"i",
"]",
")",
")",
"response",
"=",
"self",
".",
"meet_challenge",
"(",
"self",
".",
"challenges",
"[",
"i",
"]",
")",
"self",
".",
"challenges",
"[",
"i",
"]",
".",
"response",
"=",
"response"
] |
Generate the specified number of hash challenges.
:param num: The number of hash challenges we want to generate.
:param root_seed: Some value that we use to generate our seeds from.
|
[
"Generate",
"the",
"specified",
"number",
"of",
"hash",
"challenges",
"."
] |
python
|
train
| 39.473684 |
StackStorm/pybind
|
pybind/nos/v6_0_2f/rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/__init__.py#L127-L148
|
def _set_af_vrf(self, v, load=False):
"""
Setter method for af_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("af_vrf_name",af_vrf.af_vrf, yang_name="af-vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='af-vrf-name', extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}), is_container='list', yang_name="af-vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """af_vrf must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("af_vrf_name",af_vrf.af_vrf, yang_name="af-vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='af-vrf-name', extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}), is_container='list', yang_name="af-vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""",
})
self.__af_vrf = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_af_vrf",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"YANGListType",
"(",
"\"af_vrf_name\"",
",",
"af_vrf",
".",
"af_vrf",
",",
"yang_name",
"=",
"\"af-vrf\"",
",",
"rest_name",
"=",
"\"vrf\"",
",",
"parent",
"=",
"self",
",",
"is_container",
"=",
"'list'",
",",
"user_ordered",
"=",
"False",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"yang_keys",
"=",
"'af-vrf-name'",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'VRF unicast'",
",",
"u'alt-name'",
":",
"u'vrf'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'callpoint'",
":",
"u'AfIpv4UcastVrf'",
",",
"u'cli-full-command'",
":",
"None",
",",
"u'cli-full-no'",
":",
"None",
",",
"u'cli-mode-name'",
":",
"u'config-bgp-ipv4u-vrf'",
"}",
"}",
")",
",",
"is_container",
"=",
"'list'",
",",
"yang_name",
"=",
"\"af-vrf\"",
",",
"rest_name",
"=",
"\"vrf\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'VRF unicast'",
",",
"u'alt-name'",
":",
"u'vrf'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'callpoint'",
":",
"u'AfIpv4UcastVrf'",
",",
"u'cli-full-command'",
":",
"None",
",",
"u'cli-full-no'",
":",
"None",
",",
"u'cli-mode-name'",
":",
"u'config-bgp-ipv4u-vrf'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-bgp'",
",",
"defining_module",
"=",
"'brocade-bgp'",
",",
"yang_type",
"=",
"'list'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"af_vrf must be of a type compatible with list\"\"\"",
",",
"'defined-type'",
":",
"\"list\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=YANGListType(\"af_vrf_name\",af_vrf.af_vrf, yang_name=\"af-vrf\", rest_name=\"vrf\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='af-vrf-name', extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}), is_container='list', yang_name=\"af-vrf\", rest_name=\"vrf\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__af_vrf",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for af_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf() directly.
|
[
"Setter",
"method",
"for",
"af_vrf",
"mapped",
"from",
"YANG",
"variable",
"/",
"rbridge_id",
"/",
"router",
"/",
"router_bgp",
"/",
"address_family",
"/",
"ipv4",
"/",
"ipv4_unicast",
"/",
"af_vrf",
"(",
"list",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_af_vrf",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_af_vrf",
"()",
"directly",
"."
] |
python
|
train
| 120.227273 |
MSchnei/pyprf_feature
|
pyprf_feature/analysis/old/pRF_utils.py
|
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/old/pRF_utils.py#L90-L105
|
def calcR2(predTst, yTest, axis=0):
"""calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2
"""
rss = np.sum((yTest - predTst) ** 2, axis=axis)
tss = np.sum((yTest - yTest.mean()) ** 2, axis=axis)
return 1 - rss/tss
|
[
"def",
"calcR2",
"(",
"predTst",
",",
"yTest",
",",
"axis",
"=",
"0",
")",
":",
"rss",
"=",
"np",
".",
"sum",
"(",
"(",
"yTest",
"-",
"predTst",
")",
"**",
"2",
",",
"axis",
"=",
"axis",
")",
"tss",
"=",
"np",
".",
"sum",
"(",
"(",
"yTest",
"-",
"yTest",
".",
"mean",
"(",
")",
")",
"**",
"2",
",",
"axis",
"=",
"axis",
")",
"return",
"1",
"-",
"rss",
"/",
"tss"
] |
calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2
|
[
"calculate",
"coefficient",
"of",
"determination",
".",
"Assumes",
"that",
"axis",
"=",
"0",
"is",
"time"
] |
python
|
train
| 29.3125 |
google/neuroglancer
|
python/neuroglancer/server.py
|
https://github.com/google/neuroglancer/blob/9efd12741013f464286f0bf3fa0b667f75a66658/python/neuroglancer/server.py#L245-L258
|
def stop():
"""Stop the server, invalidating any viewer URLs.
This allows any previously-referenced data arrays to be garbage collected if there are no other
references to them.
"""
global global_server
if global_server is not None:
ioloop = global_server.ioloop
def stop_ioloop():
ioloop.stop()
ioloop.close()
global_server.ioloop.add_callback(stop_ioloop)
global_server = None
|
[
"def",
"stop",
"(",
")",
":",
"global",
"global_server",
"if",
"global_server",
"is",
"not",
"None",
":",
"ioloop",
"=",
"global_server",
".",
"ioloop",
"def",
"stop_ioloop",
"(",
")",
":",
"ioloop",
".",
"stop",
"(",
")",
"ioloop",
".",
"close",
"(",
")",
"global_server",
".",
"ioloop",
".",
"add_callback",
"(",
"stop_ioloop",
")",
"global_server",
"=",
"None"
] |
Stop the server, invalidating any viewer URLs.
This allows any previously-referenced data arrays to be garbage collected if there are no other
references to them.
|
[
"Stop",
"the",
"server",
"invalidating",
"any",
"viewer",
"URLs",
"."
] |
python
|
train
| 31.857143 |
ethereum/pyrlp
|
rlp/codec.py
|
https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L98-L111
|
def length_prefix(length, offset):
"""Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
"""
if length < 56:
return ALL_BYTES[offset + length]
elif length < LONG_LENGTH:
length_string = int_to_big_endian(length)
return ALL_BYTES[offset + 56 - 1 + len(length_string)] + length_string
else:
raise ValueError('Length greater than 256**8')
|
[
"def",
"length_prefix",
"(",
"length",
",",
"offset",
")",
":",
"if",
"length",
"<",
"56",
":",
"return",
"ALL_BYTES",
"[",
"offset",
"+",
"length",
"]",
"elif",
"length",
"<",
"LONG_LENGTH",
":",
"length_string",
"=",
"int_to_big_endian",
"(",
"length",
")",
"return",
"ALL_BYTES",
"[",
"offset",
"+",
"56",
"-",
"1",
"+",
"len",
"(",
"length_string",
")",
"]",
"+",
"length_string",
"else",
":",
"raise",
"ValueError",
"(",
"'Length greater than 256**8'",
")"
] |
Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
|
[
"Construct",
"the",
"prefix",
"to",
"lists",
"or",
"strings",
"denoting",
"their",
"length",
"."
] |
python
|
train
| 38.642857 |
nvbn/thefuck
|
thefuck/types.py
|
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/types.py#L131-L147
|
def from_path(cls, path):
"""Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule
"""
name = path.name[:-3]
with logs.debug_time(u'Importing rule: {};'.format(name)):
rule_module = load_source(name, str(path))
priority = getattr(rule_module, 'priority', DEFAULT_PRIORITY)
return cls(name, rule_module.match,
rule_module.get_new_command,
getattr(rule_module, 'enabled_by_default', True),
getattr(rule_module, 'side_effect', None),
settings.priority.get(name, priority),
getattr(rule_module, 'requires_output', True))
|
[
"def",
"from_path",
"(",
"cls",
",",
"path",
")",
":",
"name",
"=",
"path",
".",
"name",
"[",
":",
"-",
"3",
"]",
"with",
"logs",
".",
"debug_time",
"(",
"u'Importing rule: {};'",
".",
"format",
"(",
"name",
")",
")",
":",
"rule_module",
"=",
"load_source",
"(",
"name",
",",
"str",
"(",
"path",
")",
")",
"priority",
"=",
"getattr",
"(",
"rule_module",
",",
"'priority'",
",",
"DEFAULT_PRIORITY",
")",
"return",
"cls",
"(",
"name",
",",
"rule_module",
".",
"match",
",",
"rule_module",
".",
"get_new_command",
",",
"getattr",
"(",
"rule_module",
",",
"'enabled_by_default'",
",",
"True",
")",
",",
"getattr",
"(",
"rule_module",
",",
"'side_effect'",
",",
"None",
")",
",",
"settings",
".",
"priority",
".",
"get",
"(",
"name",
",",
"priority",
")",
",",
"getattr",
"(",
"rule_module",
",",
"'requires_output'",
",",
"True",
")",
")"
] |
Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule
|
[
"Creates",
"rule",
"instance",
"from",
"path",
"."
] |
python
|
train
| 40.823529 |
zimeon/iiif
|
iiif/static.py
|
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L214-L261
|
def generate(self, src=None, identifier=None):
"""Generate static files for one source image."""
self.src = src
self.identifier = identifier
# Get image details and calculate tiles
im = self.manipulator_klass()
im.srcfile = self.src
im.set_max_image_pixels(self.max_image_pixels)
im.do_first()
width = im.width
height = im.height
scale_factors = im.scale_factors(self.tilesize)
# Setup destination and IIIF identifier
self.setup_destination()
# Write out images
for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors):
self.generate_tile(region, size)
sizes = []
for size in static_full_sizes(width, height, self.tilesize):
# See https://github.com/zimeon/iiif/issues/9
sizes.append({'width': size[0], 'height': size[1]})
self.generate_tile('full', size)
for request in self.extras:
request.identifier = self.identifier
if (request.is_scaled_full_image()):
sizes.append({'width': request.size_wh[0],
'height': request.size_wh[1]})
self.generate_file(request)
# Write info.json
qualities = ['default'] if (self.api_version > '1.1') else ['native']
info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier,
width=width, height=height, scale_factors=scale_factors,
tile_width=self.tilesize, tile_height=self.tilesize,
formats=['jpg'], qualities=qualities, sizes=sizes,
api_version=self.api_version)
json_file = os.path.join(self.dst, self.identifier, 'info.json')
if (self.dryrun):
self.logger.warning(
"dryrun mode, would write the following files:")
self.logger.warning("%s / %s/%s" %
(self.dst, self.identifier, 'info.json'))
else:
with open(json_file, 'w') as f:
f.write(info.as_json())
f.close()
self.logger.info("%s / %s/%s" %
(self.dst, self.identifier, 'info.json'))
self.logger.debug("Written %s" % (json_file))
|
[
"def",
"generate",
"(",
"self",
",",
"src",
"=",
"None",
",",
"identifier",
"=",
"None",
")",
":",
"self",
".",
"src",
"=",
"src",
"self",
".",
"identifier",
"=",
"identifier",
"# Get image details and calculate tiles",
"im",
"=",
"self",
".",
"manipulator_klass",
"(",
")",
"im",
".",
"srcfile",
"=",
"self",
".",
"src",
"im",
".",
"set_max_image_pixels",
"(",
"self",
".",
"max_image_pixels",
")",
"im",
".",
"do_first",
"(",
")",
"width",
"=",
"im",
".",
"width",
"height",
"=",
"im",
".",
"height",
"scale_factors",
"=",
"im",
".",
"scale_factors",
"(",
"self",
".",
"tilesize",
")",
"# Setup destination and IIIF identifier",
"self",
".",
"setup_destination",
"(",
")",
"# Write out images",
"for",
"(",
"region",
",",
"size",
")",
"in",
"static_partial_tile_sizes",
"(",
"width",
",",
"height",
",",
"self",
".",
"tilesize",
",",
"scale_factors",
")",
":",
"self",
".",
"generate_tile",
"(",
"region",
",",
"size",
")",
"sizes",
"=",
"[",
"]",
"for",
"size",
"in",
"static_full_sizes",
"(",
"width",
",",
"height",
",",
"self",
".",
"tilesize",
")",
":",
"# See https://github.com/zimeon/iiif/issues/9",
"sizes",
".",
"append",
"(",
"{",
"'width'",
":",
"size",
"[",
"0",
"]",
",",
"'height'",
":",
"size",
"[",
"1",
"]",
"}",
")",
"self",
".",
"generate_tile",
"(",
"'full'",
",",
"size",
")",
"for",
"request",
"in",
"self",
".",
"extras",
":",
"request",
".",
"identifier",
"=",
"self",
".",
"identifier",
"if",
"(",
"request",
".",
"is_scaled_full_image",
"(",
")",
")",
":",
"sizes",
".",
"append",
"(",
"{",
"'width'",
":",
"request",
".",
"size_wh",
"[",
"0",
"]",
",",
"'height'",
":",
"request",
".",
"size_wh",
"[",
"1",
"]",
"}",
")",
"self",
".",
"generate_file",
"(",
"request",
")",
"# Write info.json",
"qualities",
"=",
"[",
"'default'",
"]",
"if",
"(",
"self",
".",
"api_version",
">",
"'1.1'",
")",
"else",
"[",
"'native'",
"]",
"info",
"=",
"IIIFInfo",
"(",
"level",
"=",
"0",
",",
"server_and_prefix",
"=",
"self",
".",
"prefix",
",",
"identifier",
"=",
"self",
".",
"identifier",
",",
"width",
"=",
"width",
",",
"height",
"=",
"height",
",",
"scale_factors",
"=",
"scale_factors",
",",
"tile_width",
"=",
"self",
".",
"tilesize",
",",
"tile_height",
"=",
"self",
".",
"tilesize",
",",
"formats",
"=",
"[",
"'jpg'",
"]",
",",
"qualities",
"=",
"qualities",
",",
"sizes",
"=",
"sizes",
",",
"api_version",
"=",
"self",
".",
"api_version",
")",
"json_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"dst",
",",
"self",
".",
"identifier",
",",
"'info.json'",
")",
"if",
"(",
"self",
".",
"dryrun",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"dryrun mode, would write the following files:\"",
")",
"self",
".",
"logger",
".",
"warning",
"(",
"\"%s / %s/%s\"",
"%",
"(",
"self",
".",
"dst",
",",
"self",
".",
"identifier",
",",
"'info.json'",
")",
")",
"else",
":",
"with",
"open",
"(",
"json_file",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"info",
".",
"as_json",
"(",
")",
")",
"f",
".",
"close",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"%s / %s/%s\"",
"%",
"(",
"self",
".",
"dst",
",",
"self",
".",
"identifier",
",",
"'info.json'",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Written %s\"",
"%",
"(",
"json_file",
")",
")"
] |
Generate static files for one source image.
|
[
"Generate",
"static",
"files",
"for",
"one",
"source",
"image",
"."
] |
python
|
train
| 48.333333 |
openstax/cnx-publishing
|
cnxpublishing/views/publishing.py
|
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/views/publishing.py#L77-L108
|
def get_accept_license(request):
"""This produces JSON data for a user (at ``uid``) to view the license(s)
they have accepted or will need to accept for a publication (at ``id``).
"""
publication_id = request.matchdict['id']
user_id = request.matchdict['uid']
# FIXME Is this an active publication?
# TODO Verify the accepting user is the one making the request.
# For each pending document, accept the license.
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""
SELECT row_to_json(combined_rows) FROM (
SELECT
pd.uuid AS id,
ident_hash(pd.uuid, pd.major_version, pd.minor_version) \
AS ident_hash,
accepted AS is_accepted
FROM
pending_documents AS pd
NATURAL JOIN license_acceptances AS la
WHERE pd.publication_id = %s AND user_id = %s
) as combined_rows;""",
(publication_id, user_id))
user_documents = [r[0] for r in cursor.fetchall()]
return {'publication_id': publication_id,
'user_id': user_id,
'documents': user_documents,
}
|
[
"def",
"get_accept_license",
"(",
"request",
")",
":",
"publication_id",
"=",
"request",
".",
"matchdict",
"[",
"'id'",
"]",
"user_id",
"=",
"request",
".",
"matchdict",
"[",
"'uid'",
"]",
"# FIXME Is this an active publication?",
"# TODO Verify the accepting user is the one making the request.",
"# For each pending document, accept the license.",
"with",
"db_connect",
"(",
")",
"as",
"db_conn",
":",
"with",
"db_conn",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"\"\"\"\nSELECT row_to_json(combined_rows) FROM (\nSELECT\n pd.uuid AS id,\n ident_hash(pd.uuid, pd.major_version, pd.minor_version) \\\n AS ident_hash,\n accepted AS is_accepted\nFROM\n pending_documents AS pd\n NATURAL JOIN license_acceptances AS la\nWHERE pd.publication_id = %s AND user_id = %s\n) as combined_rows;\"\"\"",
",",
"(",
"publication_id",
",",
"user_id",
")",
")",
"user_documents",
"=",
"[",
"r",
"[",
"0",
"]",
"for",
"r",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"]",
"return",
"{",
"'publication_id'",
":",
"publication_id",
",",
"'user_id'",
":",
"user_id",
",",
"'documents'",
":",
"user_documents",
",",
"}"
] |
This produces JSON data for a user (at ``uid``) to view the license(s)
they have accepted or will need to accept for a publication (at ``id``).
|
[
"This",
"produces",
"JSON",
"data",
"for",
"a",
"user",
"(",
"at",
"uid",
")",
"to",
"view",
"the",
"license",
"(",
"s",
")",
"they",
"have",
"accepted",
"or",
"will",
"need",
"to",
"accept",
"for",
"a",
"publication",
"(",
"at",
"id",
")",
"."
] |
python
|
valid
| 33.84375 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.