repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
south-coast-science/scs_core
|
src/scs_core/position/nmea/gpdatetime.py
|
https://github.com/south-coast-science/scs_core/blob/a4152b0bbed6acbbf257e1bba6a912f6ebe578e5/src/scs_core/position/nmea/gpdatetime.py#L30-L38
|
def as_iso8601(self):
"""
example: 2016-08-13T00:38:05.210+00:00
"""
if self.__date is None or self.__time is None:
return None
return "20%s-%s-%sT%s:%s:%s0Z" % \
(self.__date[4:], self.__date[2:4], self.__date[:2], self.__time[:2], self.__time[2:4], self.__time[4:])
|
[
"def",
"as_iso8601",
"(",
"self",
")",
":",
"if",
"self",
".",
"__date",
"is",
"None",
"or",
"self",
".",
"__time",
"is",
"None",
":",
"return",
"None",
"return",
"\"20%s-%s-%sT%s:%s:%s0Z\"",
"%",
"(",
"self",
".",
"__date",
"[",
"4",
":",
"]",
",",
"self",
".",
"__date",
"[",
"2",
":",
"4",
"]",
",",
"self",
".",
"__date",
"[",
":",
"2",
"]",
",",
"self",
".",
"__time",
"[",
":",
"2",
"]",
",",
"self",
".",
"__time",
"[",
"2",
":",
"4",
"]",
",",
"self",
".",
"__time",
"[",
"4",
":",
"]",
")"
] |
example: 2016-08-13T00:38:05.210+00:00
|
[
"example",
":",
"2016",
"-",
"08",
"-",
"13T00",
":",
"38",
":",
"05",
".",
"210",
"+",
"00",
":",
"00"
] |
python
|
train
| 36.333333 |
Azure/azure-event-hubs-python
|
azure/eventprocessorhost/eph.py
|
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/eph.py#L49-L55
|
async def open_async(self):
"""
Starts the host.
"""
if not self.loop:
self.loop = asyncio.get_event_loop()
await self.partition_manager.start_async()
|
[
"async",
"def",
"open_async",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"loop",
":",
"self",
".",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"await",
"self",
".",
"partition_manager",
".",
"start_async",
"(",
")"
] |
Starts the host.
|
[
"Starts",
"the",
"host",
"."
] |
python
|
train
| 28 |
joke2k/faker
|
faker/providers/date_time/__init__.py
|
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/date_time/__init__.py#L1589-L1602
|
def future_datetime(self, end_date='+30d', tzinfo=None):
"""
Get a DateTime object based on a random date between 1 second form now
and a given date.
Accepts date strings that can be recognized by strtotime().
:param end_date Defaults to "+30d"
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
"""
return self.date_time_between(
start_date='+1s', end_date=end_date, tzinfo=tzinfo,
)
|
[
"def",
"future_datetime",
"(",
"self",
",",
"end_date",
"=",
"'+30d'",
",",
"tzinfo",
"=",
"None",
")",
":",
"return",
"self",
".",
"date_time_between",
"(",
"start_date",
"=",
"'+1s'",
",",
"end_date",
"=",
"end_date",
",",
"tzinfo",
"=",
"tzinfo",
",",
")"
] |
Get a DateTime object based on a random date between 1 second form now
and a given date.
Accepts date strings that can be recognized by strtotime().
:param end_date Defaults to "+30d"
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
|
[
"Get",
"a",
"DateTime",
"object",
"based",
"on",
"a",
"random",
"date",
"between",
"1",
"second",
"form",
"now",
"and",
"a",
"given",
"date",
".",
"Accepts",
"date",
"strings",
"that",
"can",
"be",
"recognized",
"by",
"strtotime",
"()",
"."
] |
python
|
train
| 38.642857 |
gagneurlab/concise
|
concise/legacy/kmer.py
|
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/kmer.py#L28-L102
|
def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1,
seq_align="start", trim_seq_len=None):
"""
Find best k-mers for CONCISE initialization.
Args:
dt (pd.DataFrame): Table containing response variable and sequence.
response (str): Name of the column used as the reponse variable.
sequence (str): Name of the column storing the DNA/RNA sequences.
k (int): Desired k-mer length.
n_cores (int): Number of cores to use for computation. It can use up to 3 cores.
consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG?
seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences?
trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered.
Returns:
string list: Best set of motifs for this dataset sorted with respect to
confidence (best candidate occuring first).
Details:
First a lasso model gets fitted to get a set of initial motifs. Next, the best
subset of unrelated motifs is selected by stepwise selection.
"""
y = dt[response]
seq = dt[sequence]
if trim_seq_len is not None:
seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len)
seq = [s.replace("N", "") for s in seq]
dt_kmer = kmer_count(seq, k)
Xsp = csc_matrix(dt_kmer)
en = ElasticNet(alpha=1, standardize=False, n_splits=3)
en.fit(Xsp, y)
# which coefficients are nonzero?=
nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist()
# perform stepwise selection
#
# TODO - how do we deal with the intercept?
# largest number of motifs where they don't differ by more than 1 k-mer
def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True):
"""
perform stepwise model selection while preventing to add a motif similar to the
already selected motifs.
"""
F, pval = f_regression(dt_kmer[to_be_selected_kmers], y)
kmer = to_be_selected_kmers.pop(pval.argmin())
selected_kmers.append(kmer)
def select_criterion(s1, s2, consider_shift=True):
if hamming_distance(s1, s2) <= 1:
return False
if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0:
return False
if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0:
return False
return True
to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers
if select_criterion(ckmer, kmer, consider_shift)]
if len(to_be_selected_kmers) == 0:
return selected_kmers
else:
# regress out the new feature
lm = LinearRegression()
lm.fit(dt_kmer[selected_kmers], y)
y_new = y - lm.predict(dt_kmer[selected_kmers])
return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift)
selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift)
return selected_kmers
|
[
"def",
"best_kmers",
"(",
"dt",
",",
"response",
",",
"sequence",
",",
"k",
"=",
"6",
",",
"consider_shift",
"=",
"True",
",",
"n_cores",
"=",
"1",
",",
"seq_align",
"=",
"\"start\"",
",",
"trim_seq_len",
"=",
"None",
")",
":",
"y",
"=",
"dt",
"[",
"response",
"]",
"seq",
"=",
"dt",
"[",
"sequence",
"]",
"if",
"trim_seq_len",
"is",
"not",
"None",
":",
"seq",
"=",
"pad_sequences",
"(",
"seq",
",",
"align",
"=",
"seq_align",
",",
"maxlen",
"=",
"trim_seq_len",
")",
"seq",
"=",
"[",
"s",
".",
"replace",
"(",
"\"N\"",
",",
"\"\"",
")",
"for",
"s",
"in",
"seq",
"]",
"dt_kmer",
"=",
"kmer_count",
"(",
"seq",
",",
"k",
")",
"Xsp",
"=",
"csc_matrix",
"(",
"dt_kmer",
")",
"en",
"=",
"ElasticNet",
"(",
"alpha",
"=",
"1",
",",
"standardize",
"=",
"False",
",",
"n_splits",
"=",
"3",
")",
"en",
".",
"fit",
"(",
"Xsp",
",",
"y",
")",
"# which coefficients are nonzero?=",
"nonzero_kmers",
"=",
"dt_kmer",
".",
"columns",
".",
"values",
"[",
"en",
".",
"coef_",
"!=",
"0",
"]",
".",
"tolist",
"(",
")",
"# perform stepwise selection",
"#",
"# TODO - how do we deal with the intercept?",
"# largest number of motifs where they don't differ by more than 1 k-mer",
"def",
"find_next_best",
"(",
"dt_kmer",
",",
"y",
",",
"selected_kmers",
",",
"to_be_selected_kmers",
",",
"consider_shift",
"=",
"True",
")",
":",
"\"\"\"\n perform stepwise model selection while preventing to add a motif similar to the\n already selected motifs.\n \"\"\"",
"F",
",",
"pval",
"=",
"f_regression",
"(",
"dt_kmer",
"[",
"to_be_selected_kmers",
"]",
",",
"y",
")",
"kmer",
"=",
"to_be_selected_kmers",
".",
"pop",
"(",
"pval",
".",
"argmin",
"(",
")",
")",
"selected_kmers",
".",
"append",
"(",
"kmer",
")",
"def",
"select_criterion",
"(",
"s1",
",",
"s2",
",",
"consider_shift",
"=",
"True",
")",
":",
"if",
"hamming_distance",
"(",
"s1",
",",
"s2",
")",
"<=",
"1",
":",
"return",
"False",
"if",
"consider_shift",
"and",
"hamming_distance",
"(",
"s1",
"[",
"1",
":",
"]",
",",
"s2",
"[",
":",
"-",
"1",
"]",
")",
"==",
"0",
":",
"return",
"False",
"if",
"consider_shift",
"and",
"hamming_distance",
"(",
"s1",
"[",
":",
"-",
"1",
"]",
",",
"s2",
"[",
"1",
":",
"]",
")",
"==",
"0",
":",
"return",
"False",
"return",
"True",
"to_be_selected_kmers",
"=",
"[",
"ckmer",
"for",
"ckmer",
"in",
"to_be_selected_kmers",
"if",
"select_criterion",
"(",
"ckmer",
",",
"kmer",
",",
"consider_shift",
")",
"]",
"if",
"len",
"(",
"to_be_selected_kmers",
")",
"==",
"0",
":",
"return",
"selected_kmers",
"else",
":",
"# regress out the new feature",
"lm",
"=",
"LinearRegression",
"(",
")",
"lm",
".",
"fit",
"(",
"dt_kmer",
"[",
"selected_kmers",
"]",
",",
"y",
")",
"y_new",
"=",
"y",
"-",
"lm",
".",
"predict",
"(",
"dt_kmer",
"[",
"selected_kmers",
"]",
")",
"return",
"find_next_best",
"(",
"dt_kmer",
",",
"y_new",
",",
"selected_kmers",
",",
"to_be_selected_kmers",
",",
"consider_shift",
")",
"selected_kmers",
"=",
"find_next_best",
"(",
"dt_kmer",
",",
"y",
",",
"[",
"]",
",",
"nonzero_kmers",
",",
"consider_shift",
")",
"return",
"selected_kmers"
] |
Find best k-mers for CONCISE initialization.
Args:
dt (pd.DataFrame): Table containing response variable and sequence.
response (str): Name of the column used as the reponse variable.
sequence (str): Name of the column storing the DNA/RNA sequences.
k (int): Desired k-mer length.
n_cores (int): Number of cores to use for computation. It can use up to 3 cores.
consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG?
seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences?
trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered.
Returns:
string list: Best set of motifs for this dataset sorted with respect to
confidence (best candidate occuring first).
Details:
First a lasso model gets fitted to get a set of initial motifs. Next, the best
subset of unrelated motifs is selected by stepwise selection.
|
[
"Find",
"best",
"k",
"-",
"mers",
"for",
"CONCISE",
"initialization",
"."
] |
python
|
train
| 43.693333 |
ncclient/ncclient
|
ncclient/transport/session.py
|
https://github.com/ncclient/ncclient/blob/2b75f2c6a06bd2a5d1be67b01bb65c5ffd2e2d7a/ncclient/transport/session.py#L248-L264
|
def build(capabilities, device_handler):
"Given a list of capability URI's returns <hello> message XML string"
if device_handler:
# This is used as kwargs dictionary for lxml's Element() function.
# Therefore the arg-name ("nsmap") is used as key here.
xml_namespace_kwargs = { "nsmap" : device_handler.get_xml_base_namespace_dict() }
else:
xml_namespace_kwargs = {}
hello = new_ele("hello", **xml_namespace_kwargs)
caps = sub_ele(hello, "capabilities")
def fun(uri): sub_ele(caps, "capability").text = uri
#python3 changes
if sys.version < '3':
map(fun, capabilities)
else:
list(map(fun, capabilities))
return to_xml(hello)
|
[
"def",
"build",
"(",
"capabilities",
",",
"device_handler",
")",
":",
"if",
"device_handler",
":",
"# This is used as kwargs dictionary for lxml's Element() function.",
"# Therefore the arg-name (\"nsmap\") is used as key here.",
"xml_namespace_kwargs",
"=",
"{",
"\"nsmap\"",
":",
"device_handler",
".",
"get_xml_base_namespace_dict",
"(",
")",
"}",
"else",
":",
"xml_namespace_kwargs",
"=",
"{",
"}",
"hello",
"=",
"new_ele",
"(",
"\"hello\"",
",",
"*",
"*",
"xml_namespace_kwargs",
")",
"caps",
"=",
"sub_ele",
"(",
"hello",
",",
"\"capabilities\"",
")",
"def",
"fun",
"(",
"uri",
")",
":",
"sub_ele",
"(",
"caps",
",",
"\"capability\"",
")",
".",
"text",
"=",
"uri",
"#python3 changes",
"if",
"sys",
".",
"version",
"<",
"'3'",
":",
"map",
"(",
"fun",
",",
"capabilities",
")",
"else",
":",
"list",
"(",
"map",
"(",
"fun",
",",
"capabilities",
")",
")",
"return",
"to_xml",
"(",
"hello",
")"
] |
Given a list of capability URI's returns <hello> message XML string
|
[
"Given",
"a",
"list",
"of",
"capability",
"URI",
"s",
"returns",
"<hello",
">",
"message",
"XML",
"string"
] |
python
|
train
| 44.705882 |
apache/incubator-heron
|
heron/common/src/python/utils/log.py
|
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/common/src/python/utils/log.py#L93-L104
|
def set_logging_level(cl_args):
"""simply set verbose level based on command-line args
:param cl_args: CLI arguments
:type cl_args: dict
:return: None
:rtype: None
"""
if 'verbose' in cl_args and cl_args['verbose']:
configure(logging.DEBUG)
else:
configure(logging.INFO)
|
[
"def",
"set_logging_level",
"(",
"cl_args",
")",
":",
"if",
"'verbose'",
"in",
"cl_args",
"and",
"cl_args",
"[",
"'verbose'",
"]",
":",
"configure",
"(",
"logging",
".",
"DEBUG",
")",
"else",
":",
"configure",
"(",
"logging",
".",
"INFO",
")"
] |
simply set verbose level based on command-line args
:param cl_args: CLI arguments
:type cl_args: dict
:return: None
:rtype: None
|
[
"simply",
"set",
"verbose",
"level",
"based",
"on",
"command",
"-",
"line",
"args"
] |
python
|
valid
| 23.666667 |
boriel/zxbasic
|
asm.py
|
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asm.py#L115-L126
|
def argval(self):
""" Returns the value of the arg (if any) or None.
If the arg. is not an integer, an error be triggered.
"""
if self.arg is None or any(x is None for x in self.arg):
return None
for x in self.arg:
if not isinstance(x, int):
raise InvalidArgError(self.arg)
return self.arg
|
[
"def",
"argval",
"(",
"self",
")",
":",
"if",
"self",
".",
"arg",
"is",
"None",
"or",
"any",
"(",
"x",
"is",
"None",
"for",
"x",
"in",
"self",
".",
"arg",
")",
":",
"return",
"None",
"for",
"x",
"in",
"self",
".",
"arg",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"int",
")",
":",
"raise",
"InvalidArgError",
"(",
"self",
".",
"arg",
")",
"return",
"self",
".",
"arg"
] |
Returns the value of the arg (if any) or None.
If the arg. is not an integer, an error be triggered.
|
[
"Returns",
"the",
"value",
"of",
"the",
"arg",
"(",
"if",
"any",
")",
"or",
"None",
".",
"If",
"the",
"arg",
".",
"is",
"not",
"an",
"integer",
"an",
"error",
"be",
"triggered",
"."
] |
python
|
train
| 30.666667 |
IdentityPython/SATOSA
|
src/satosa/state.py
|
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/state.py#L235-L253
|
def urlstate(self, encryption_key):
"""
Will return a url safe representation of the state.
:type encryption_key: Key used for encryption.
:rtype: str
:return: Url representation av of the state.
"""
lzma = LZMACompressor()
urlstate_data = json.dumps(self._state_dict)
urlstate_data = lzma.compress(urlstate_data.encode("UTF-8"))
urlstate_data += lzma.flush()
urlstate_data = _AESCipher(encryption_key).encrypt(urlstate_data)
lzma = LZMACompressor()
urlstate_data = lzma.compress(urlstate_data)
urlstate_data += lzma.flush()
urlstate_data = base64.urlsafe_b64encode(urlstate_data)
return urlstate_data.decode("utf-8")
|
[
"def",
"urlstate",
"(",
"self",
",",
"encryption_key",
")",
":",
"lzma",
"=",
"LZMACompressor",
"(",
")",
"urlstate_data",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"_state_dict",
")",
"urlstate_data",
"=",
"lzma",
".",
"compress",
"(",
"urlstate_data",
".",
"encode",
"(",
"\"UTF-8\"",
")",
")",
"urlstate_data",
"+=",
"lzma",
".",
"flush",
"(",
")",
"urlstate_data",
"=",
"_AESCipher",
"(",
"encryption_key",
")",
".",
"encrypt",
"(",
"urlstate_data",
")",
"lzma",
"=",
"LZMACompressor",
"(",
")",
"urlstate_data",
"=",
"lzma",
".",
"compress",
"(",
"urlstate_data",
")",
"urlstate_data",
"+=",
"lzma",
".",
"flush",
"(",
")",
"urlstate_data",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"urlstate_data",
")",
"return",
"urlstate_data",
".",
"decode",
"(",
"\"utf-8\"",
")"
] |
Will return a url safe representation of the state.
:type encryption_key: Key used for encryption.
:rtype: str
:return: Url representation av of the state.
|
[
"Will",
"return",
"a",
"url",
"safe",
"representation",
"of",
"the",
"state",
"."
] |
python
|
train
| 38.368421 |
Nukesor/pueue
|
pueue/daemon/daemon.py
|
https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/daemon/daemon.py#L319-L344
|
def send_status(self, payload):
"""Send the daemon status and the current queue for displaying."""
answer = {}
data = []
# Get daemon status
if self.paused:
answer['status'] = 'paused'
else:
answer['status'] = 'running'
# Add current queue or a message, that queue is empty
if len(self.queue) > 0:
data = deepcopy(self.queue.queue)
# Remove stderr and stdout output for transfer
# Some outputs are way to big for the socket buffer
# and this is not needed by the client
for key, item in data.items():
if 'stderr' in item:
del item['stderr']
if 'stdout' in item:
del item['stdout']
else:
data = 'Queue is empty'
answer['data'] = data
return answer
|
[
"def",
"send_status",
"(",
"self",
",",
"payload",
")",
":",
"answer",
"=",
"{",
"}",
"data",
"=",
"[",
"]",
"# Get daemon status",
"if",
"self",
".",
"paused",
":",
"answer",
"[",
"'status'",
"]",
"=",
"'paused'",
"else",
":",
"answer",
"[",
"'status'",
"]",
"=",
"'running'",
"# Add current queue or a message, that queue is empty",
"if",
"len",
"(",
"self",
".",
"queue",
")",
">",
"0",
":",
"data",
"=",
"deepcopy",
"(",
"self",
".",
"queue",
".",
"queue",
")",
"# Remove stderr and stdout output for transfer",
"# Some outputs are way to big for the socket buffer",
"# and this is not needed by the client",
"for",
"key",
",",
"item",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"'stderr'",
"in",
"item",
":",
"del",
"item",
"[",
"'stderr'",
"]",
"if",
"'stdout'",
"in",
"item",
":",
"del",
"item",
"[",
"'stdout'",
"]",
"else",
":",
"data",
"=",
"'Queue is empty'",
"answer",
"[",
"'data'",
"]",
"=",
"data",
"return",
"answer"
] |
Send the daemon status and the current queue for displaying.
|
[
"Send",
"the",
"daemon",
"status",
"and",
"the",
"current",
"queue",
"for",
"displaying",
"."
] |
python
|
train
| 33.807692 |
ozgurgunes/django-manifest
|
manifest/accounts/models.py
|
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/models.py#L273-L300
|
def get_full_name_or_username(self):
"""
Returns the full name of the user, or if none is supplied will return
the username.
Also looks at ``ACCOUNTS_WITHOUT_USERNAMES`` settings to define if it
should return the username or email address when the full name is not
supplied.
:return:
``String`` containing the full name of the user. If no name is
supplied it will return the username or email address depending on
the ``ACCOUNTS_WITHOUT_USERNAMES`` setting.
"""
if self.first_name or self.last_name:
# We will return this as translated string. Maybe there are some
# countries that first display the last name.
name = _(u"%(first_name)s %(last_name)s") % \
{'first_name': self.first_name,
'last_name': self.last_name}
else:
# Fallback to the username if usernames are used
if not defaults.ACCOUNTS_WITHOUT_USERNAMES:
name = "%(username)s" % {'username': self.username}
else:
name = "%(email)s" % {'email': self.email}
return name.strip()
|
[
"def",
"get_full_name_or_username",
"(",
"self",
")",
":",
"if",
"self",
".",
"first_name",
"or",
"self",
".",
"last_name",
":",
"# We will return this as translated string. Maybe there are some",
"# countries that first display the last name.",
"name",
"=",
"_",
"(",
"u\"%(first_name)s %(last_name)s\"",
")",
"%",
"{",
"'first_name'",
":",
"self",
".",
"first_name",
",",
"'last_name'",
":",
"self",
".",
"last_name",
"}",
"else",
":",
"# Fallback to the username if usernames are used",
"if",
"not",
"defaults",
".",
"ACCOUNTS_WITHOUT_USERNAMES",
":",
"name",
"=",
"\"%(username)s\"",
"%",
"{",
"'username'",
":",
"self",
".",
"username",
"}",
"else",
":",
"name",
"=",
"\"%(email)s\"",
"%",
"{",
"'email'",
":",
"self",
".",
"email",
"}",
"return",
"name",
".",
"strip",
"(",
")"
] |
Returns the full name of the user, or if none is supplied will return
the username.
Also looks at ``ACCOUNTS_WITHOUT_USERNAMES`` settings to define if it
should return the username or email address when the full name is not
supplied.
:return:
``String`` containing the full name of the user. If no name is
supplied it will return the username or email address depending on
the ``ACCOUNTS_WITHOUT_USERNAMES`` setting.
|
[
"Returns",
"the",
"full",
"name",
"of",
"the",
"user",
"or",
"if",
"none",
"is",
"supplied",
"will",
"return",
"the",
"username",
"."
] |
python
|
train
| 41.928571 |
spdx/tools-python
|
spdx/parsers/tagvaluebuilders.py
|
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/tagvaluebuilders.py#L788-L802
|
def set_pkg_desc(self, doc, text):
"""Set's the package's description.
Raises SPDXValueError if text is not free form text.
Raises CardinalityError if description already set.
Raises OrderError if no package previously defined.
"""
self.assert_package_exists()
if not self.package_desc_set:
self.package_desc_set = True
if validations.validate_pkg_desc(text):
doc.package.description = str_from_text(text)
else:
raise SPDXValueError('Package::Description')
else:
raise CardinalityError('Package::Description')
|
[
"def",
"set_pkg_desc",
"(",
"self",
",",
"doc",
",",
"text",
")",
":",
"self",
".",
"assert_package_exists",
"(",
")",
"if",
"not",
"self",
".",
"package_desc_set",
":",
"self",
".",
"package_desc_set",
"=",
"True",
"if",
"validations",
".",
"validate_pkg_desc",
"(",
"text",
")",
":",
"doc",
".",
"package",
".",
"description",
"=",
"str_from_text",
"(",
"text",
")",
"else",
":",
"raise",
"SPDXValueError",
"(",
"'Package::Description'",
")",
"else",
":",
"raise",
"CardinalityError",
"(",
"'Package::Description'",
")"
] |
Set's the package's description.
Raises SPDXValueError if text is not free form text.
Raises CardinalityError if description already set.
Raises OrderError if no package previously defined.
|
[
"Set",
"s",
"the",
"package",
"s",
"description",
".",
"Raises",
"SPDXValueError",
"if",
"text",
"is",
"not",
"free",
"form",
"text",
".",
"Raises",
"CardinalityError",
"if",
"description",
"already",
"set",
".",
"Raises",
"OrderError",
"if",
"no",
"package",
"previously",
"defined",
"."
] |
python
|
valid
| 42.6 |
saltstack/salt
|
salt/states/boto_ec2.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L589-L988
|
def instance_present(name, instance_name=None, instance_id=None, image_id=None,
image_name=None, tags=None, key_name=None,
security_groups=None, user_data=None, instance_type=None,
placement=None, kernel_id=None, ramdisk_id=None,
vpc_id=None, vpc_name=None, monitoring_enabled=None,
subnet_id=None, subnet_name=None, private_ip_address=None,
block_device_map=None, disable_api_termination=None,
instance_initiated_shutdown_behavior=None,
placement_group=None, client_token=None,
security_group_ids=None, security_group_names=None,
additional_info=None, tenancy=None,
instance_profile_arn=None, instance_profile_name=None,
ebs_optimized=None, network_interfaces=None,
network_interface_name=None,
network_interface_id=None,
attributes=None, target_state=None, public_ip=None,
allocation_id=None, allocate_eip=False, region=None,
key=None, keyid=None, profile=None):
### TODO - implement 'target_state={running, stopped}'
'''
Ensure an EC2 instance is running with the given attributes and state.
name
(string) - The name of the state definition. Recommended that this
match the instance_name attribute (generally the FQDN of the instance).
instance_name
(string) - The name of the instance, generally its FQDN. Exclusive with
'instance_id'.
instance_id
(string) - The ID of the instance (if known). Exclusive with
'instance_name'.
image_id
(string) – The ID of the AMI image to run.
image_name
(string) – The name of the AMI image to run.
tags
(dict) - Tags to apply to the instance.
key_name
(string) – The name of the key pair with which to launch instances.
security_groups
(list of strings) – The names of the EC2 classic security groups with
which to associate instances
user_data
(string) – The Base64-encoded MIME user data to be made available to the
instance(s) in this reservation.
instance_type
(string) – The EC2 instance size/type. Note that only certain types are
compatible with HVM based AMIs.
placement
(string) – The Availability Zone to launch the instance into.
kernel_id
(string) – The ID of the kernel with which to launch the instances.
ramdisk_id
(string) – The ID of the RAM disk with which to launch the instances.
vpc_id
(string) - The ID of a VPC to attach the instance to.
vpc_name
(string) - The name of a VPC to attach the instance to.
monitoring_enabled
(bool) – Enable detailed CloudWatch monitoring on the instance.
subnet_id
(string) – The ID of the subnet within which to launch the instances for
VPC.
subnet_name
(string) – The name of the subnet within which to launch the instances
for VPC.
private_ip_address
(string) – If you’re using VPC, you can optionally use this parameter to
assign the instance a specific available IP address from the subnet
(e.g., 10.0.0.25).
block_device_map
(boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping
data structure describing the EBS volumes associated with the Image.
disable_api_termination
(bool) – If True, the instances will be locked and will not be able to
be terminated via the API.
instance_initiated_shutdown_behavior
(string) – Specifies whether the instance stops or terminates on
instance-initiated shutdown. Valid values are:
- 'stop'
- 'terminate'
placement_group
(string) – If specified, this is the name of the placement group in
which the instance(s) will be launched.
client_token
(string) – Unique, case-sensitive identifier you provide to ensure
idempotency of the request. Maximum 64 ASCII characters.
security_group_ids
(list of strings) – The IDs of the VPC security groups with which to
associate instances.
security_group_names
(list of strings) – The names of the VPC security groups with which to
associate instances.
additional_info
(string) – Specifies additional information to make available to the
instance(s).
tenancy
(string) – The tenancy of the instance you want to launch. An instance
with a tenancy of ‘dedicated’ runs on single-tenant hardware and can
only be launched into a VPC. Valid values are:”default” or “dedicated”.
NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well.
instance_profile_arn
(string) – The Amazon resource name (ARN) of the IAM Instance Profile
(IIP) to associate with the instances.
instance_profile_name
(string) – The name of the IAM Instance Profile (IIP) to associate with
the instances.
ebs_optimized
(bool) – Whether the instance is optimized for EBS I/O. This
optimization provides dedicated throughput to Amazon EBS and a tuned
configuration stack to provide optimal EBS I/O performance. This
optimization isn’t available with all instance types.
network_interfaces
(boto.ec2.networkinterface.NetworkInterfaceCollection) – A
NetworkInterfaceCollection data structure containing the ENI
specifications for the instance.
network_interface_name
(string) - The name of Elastic Network Interface to attach
.. versionadded:: 2016.11.0
network_interface_id
(string) - The id of Elastic Network Interface to attach
.. versionadded:: 2016.11.0
attributes
(dict) - Instance attributes and value to be applied to the instance.
Available options are:
- instanceType - A valid instance type (m1.small)
- kernel - Kernel ID (None)
- ramdisk - Ramdisk ID (None)
- userData - Base64 encoded String (None)
- disableApiTermination - Boolean (true)
- instanceInitiatedShutdownBehavior - stop|terminate
- blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’]
- sourceDestCheck - Boolean (true)
- groupSet - Set of Security Groups or IDs
- ebsOptimized - Boolean (false)
- sriovNetSupport - String - ie: ‘simple’
target_state
(string) - The desired target state of the instance. Available options
are:
- running
- stopped
Note that this option is currently UNIMPLEMENTED.
public_ip:
(string) - The IP of a previously allocated EIP address, which will be
attached to the instance. EC2 Classic instances ONLY - for VCP pass in
an allocation_id instead.
allocation_id:
(string) - The ID of a previously allocated EIP address, which will be
attached to the instance. VPC instances ONLY - for Classic pass in
a public_ip instead.
allocate_eip:
(bool) - Allocate and attach an EIP on-the-fly for this instance. Note
you'll want to releaase this address when terminating the instance,
either manually or via the 'release_eip' flag to 'instance_absent'.
region
(string) - Region to connect to.
key
(string) - Secret key to be used.
keyid
(string) - Access key to be used.
profile
(variable) - A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
_create = False
running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped')
changed_attrs = {}
if not salt.utils.data.exactly_one((image_id, image_name)):
raise SaltInvocationError('Exactly one of image_id OR '
'image_name must be provided.')
if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)):
raise SaltInvocationError('At most one of public_ip, allocation_id OR '
'allocate_eip may be provided.')
if instance_id:
exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key,
keyid=keyid, profile=profile, in_states=running_states)
if not exists:
_create = True
else:
instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name,
region=region, key=key, keyid=keyid, profile=profile,
in_states=running_states)
if not instances:
_create = True
elif len(instances) > 1:
log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id')
instance_id = None # No way to know, we'll just have to bail later....
else:
instance_id = instances[0]
if _create:
if __opts__['test']:
ret['comment'] = 'The instance {0} is set to be created.'.format(name)
ret['result'] = None
return ret
if image_name:
args = {'ami_name': image_name, 'region': region, 'key': key,
'keyid': keyid, 'profile': profile}
image_ids = __salt__['boto_ec2.find_images'](**args)
if image_ids:
image_id = image_ids[0]
else:
image_id = image_name
r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name,
tags=tags, key_name=key_name,
security_groups=security_groups, user_data=user_data,
instance_type=instance_type, placement=placement,
kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id,
vpc_name=vpc_name, monitoring_enabled=monitoring_enabled,
subnet_id=subnet_id, subnet_name=subnet_name,
private_ip_address=private_ip_address,
block_device_map=block_device_map,
disable_api_termination=disable_api_termination,
instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior,
placement_group=placement_group, client_token=client_token,
security_group_ids=security_group_ids,
security_group_names=security_group_names,
additional_info=additional_info, tenancy=tenancy,
instance_profile_arn=instance_profile_arn,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized, network_interfaces=network_interfaces,
network_interface_name=network_interface_name,
network_interface_id=network_interface_id,
region=region, key=key, keyid=keyid, profile=profile)
if not r or 'instance_id' not in r:
ret['result'] = False
ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name)
return ret
instance_id = r['instance_id']
ret['changes'] = {'old': {}, 'new': {}}
ret['changes']['old']['instance_id'] = None
ret['changes']['new']['instance_id'] = instance_id
# To avoid issues we only allocate new EIPs at instance creation.
# This might miss situations where an instance is initially created
# created without and one is added later, but the alternative is the
# risk of EIPs allocated at every state run.
if allocate_eip:
if __opts__['test']:
ret['comment'] = 'New EIP would be allocated.'
ret['result'] = None
return ret
domain = 'vpc' if vpc_id or vpc_name else None
r = __salt__['boto_ec2.allocate_eip_address'](
domain=domain, region=region, key=key, keyid=keyid,
profile=profile)
if not r:
ret['result'] = False
ret['comment'] = 'Failed to allocate new EIP.'
return ret
allocation_id = r['allocation_id']
log.info("New EIP with address %s allocated.", r['public_ip'])
else:
log.info("EIP not requested.")
if public_ip or allocation_id:
# This can take a bit to show up, give it a chance to...
tries = 10
secs = 3
for t in range(tries):
r = __salt__['boto_ec2.get_eip_address_info'](
addresses=public_ip, allocation_ids=allocation_id,
region=region, key=key, keyid=keyid, profile=profile)
if r:
break
else:
log.info(
'Waiting up to %s secs for new EIP %s to become available',
tries * secs, public_ip or allocation_id
)
time.sleep(secs)
if not r:
ret['result'] = False
ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id)
return ret
ip = r[0]['public_ip']
if r[0].get('instance_id'):
if r[0]['instance_id'] != instance_id:
ret['result'] = False
ret['comment'] = ('EIP {0} is already associated with instance '
'{1}.'.format(public_ip if public_ip else
allocation_id, r[0]['instance_id']))
return ret
else:
if __opts__['test']:
ret['comment'] = 'Instance {0} to be updated.'.format(name)
ret['result'] = None
return ret
r = __salt__['boto_ec2.associate_eip_address'](
instance_id=instance_id, public_ip=public_ip,
allocation_id=allocation_id, region=region, key=key,
keyid=keyid, profile=profile)
if r:
if 'new' not in ret['changes']:
ret['changes']['new'] = {}
ret['changes']['new']['public_ip'] = ip
else:
ret['result'] = False
ret['comment'] = 'Failed to attach EIP to instance {0}.'.format(
instance_name if instance_name else name)
return ret
if attributes:
for k, v in six.iteritems(attributes):
curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key,
keyid=keyid, profile=profile)
curr = {} if not isinstance(curr, dict) else curr
if curr.get(k) == v:
continue
else:
if __opts__['test']:
changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format(
k, curr.get(k), v)
continue
try:
r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v,
instance_id=instance_id, region=region,
key=key, keyid=keyid, profile=profile)
except SaltInvocationError as e:
ret['result'] = False
ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name)
return ret
ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}}
ret['changes']['old'][k] = curr.get(k)
ret['changes']['new'][k] = v
if __opts__['test']:
if changed_attrs:
ret['changes']['new'] = changed_attrs
ret['result'] = None
else:
ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name)
ret['result'] = True
if tags and instance_id is not None:
tags = dict(tags)
curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id},
region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {}))
current = set(curr_tags.keys())
desired = set(tags.keys())
remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set...
add = dict([(t, tags[t]) for t in desired - current])
replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)])
# Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative.
add.update(replace)
if add or remove:
if __opts__['test']:
ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {}
ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {}
ret['changes']['old']['tags'] = curr_tags
ret['changes']['new']['tags'] = tags
ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if
instance_name else name)
else:
if remove:
if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove,
region=region, key=key, keyid=keyid,
profile=profile):
msg = "Error while deleting tags on instance {0}".format(instance_name if
instance_name else name)
log.error(msg)
ret['comment'] += ' ' + msg
ret['result'] = False
return ret
if add:
if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add,
region=region, key=key, keyid=keyid,
profile=profile):
msg = "Error while creating tags on instance {0}".format(instance_name if
instance_name else name)
log.error(msg)
ret['comment'] += ' ' + msg
ret['result'] = False
return ret
ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {}
ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {}
ret['changes']['old']['tags'] = curr_tags
ret['changes']['new']['tags'] = tags
return ret
|
[
"def",
"instance_present",
"(",
"name",
",",
"instance_name",
"=",
"None",
",",
"instance_id",
"=",
"None",
",",
"image_id",
"=",
"None",
",",
"image_name",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"key_name",
"=",
"None",
",",
"security_groups",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"instance_type",
"=",
"None",
",",
"placement",
"=",
"None",
",",
"kernel_id",
"=",
"None",
",",
"ramdisk_id",
"=",
"None",
",",
"vpc_id",
"=",
"None",
",",
"vpc_name",
"=",
"None",
",",
"monitoring_enabled",
"=",
"None",
",",
"subnet_id",
"=",
"None",
",",
"subnet_name",
"=",
"None",
",",
"private_ip_address",
"=",
"None",
",",
"block_device_map",
"=",
"None",
",",
"disable_api_termination",
"=",
"None",
",",
"instance_initiated_shutdown_behavior",
"=",
"None",
",",
"placement_group",
"=",
"None",
",",
"client_token",
"=",
"None",
",",
"security_group_ids",
"=",
"None",
",",
"security_group_names",
"=",
"None",
",",
"additional_info",
"=",
"None",
",",
"tenancy",
"=",
"None",
",",
"instance_profile_arn",
"=",
"None",
",",
"instance_profile_name",
"=",
"None",
",",
"ebs_optimized",
"=",
"None",
",",
"network_interfaces",
"=",
"None",
",",
"network_interface_name",
"=",
"None",
",",
"network_interface_id",
"=",
"None",
",",
"attributes",
"=",
"None",
",",
"target_state",
"=",
"None",
",",
"public_ip",
"=",
"None",
",",
"allocation_id",
"=",
"None",
",",
"allocate_eip",
"=",
"False",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"### TODO - implement 'target_state={running, stopped}'",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"_create",
"=",
"False",
"running_states",
"=",
"(",
"'pending'",
",",
"'rebooting'",
",",
"'running'",
",",
"'stopping'",
",",
"'stopped'",
")",
"changed_attrs",
"=",
"{",
"}",
"if",
"not",
"salt",
".",
"utils",
".",
"data",
".",
"exactly_one",
"(",
"(",
"image_id",
",",
"image_name",
")",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'Exactly one of image_id OR '",
"'image_name must be provided.'",
")",
"if",
"(",
"public_ip",
"or",
"allocation_id",
"or",
"allocate_eip",
")",
"and",
"not",
"salt",
".",
"utils",
".",
"data",
".",
"exactly_one",
"(",
"(",
"public_ip",
",",
"allocation_id",
",",
"allocate_eip",
")",
")",
":",
"raise",
"SaltInvocationError",
"(",
"'At most one of public_ip, allocation_id OR '",
"'allocate_eip may be provided.'",
")",
"if",
"instance_id",
":",
"exists",
"=",
"__salt__",
"[",
"'boto_ec2.exists'",
"]",
"(",
"instance_id",
"=",
"instance_id",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
"in_states",
"=",
"running_states",
")",
"if",
"not",
"exists",
":",
"_create",
"=",
"True",
"else",
":",
"instances",
"=",
"__salt__",
"[",
"'boto_ec2.find_instances'",
"]",
"(",
"name",
"=",
"instance_name",
"if",
"instance_name",
"else",
"name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
"in_states",
"=",
"running_states",
")",
"if",
"not",
"instances",
":",
"_create",
"=",
"True",
"elif",
"len",
"(",
"instances",
")",
">",
"1",
":",
"log",
".",
"debug",
"(",
"'Multiple instances matching criteria found - cannot determine a singular instance-id'",
")",
"instance_id",
"=",
"None",
"# No way to know, we'll just have to bail later....",
"else",
":",
"instance_id",
"=",
"instances",
"[",
"0",
"]",
"if",
"_create",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'The instance {0} is set to be created.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"if",
"image_name",
":",
"args",
"=",
"{",
"'ami_name'",
":",
"image_name",
",",
"'region'",
":",
"region",
",",
"'key'",
":",
"key",
",",
"'keyid'",
":",
"keyid",
",",
"'profile'",
":",
"profile",
"}",
"image_ids",
"=",
"__salt__",
"[",
"'boto_ec2.find_images'",
"]",
"(",
"*",
"*",
"args",
")",
"if",
"image_ids",
":",
"image_id",
"=",
"image_ids",
"[",
"0",
"]",
"else",
":",
"image_id",
"=",
"image_name",
"r",
"=",
"__salt__",
"[",
"'boto_ec2.run'",
"]",
"(",
"image_id",
",",
"instance_name",
"if",
"instance_name",
"else",
"name",
",",
"tags",
"=",
"tags",
",",
"key_name",
"=",
"key_name",
",",
"security_groups",
"=",
"security_groups",
",",
"user_data",
"=",
"user_data",
",",
"instance_type",
"=",
"instance_type",
",",
"placement",
"=",
"placement",
",",
"kernel_id",
"=",
"kernel_id",
",",
"ramdisk_id",
"=",
"ramdisk_id",
",",
"vpc_id",
"=",
"vpc_id",
",",
"vpc_name",
"=",
"vpc_name",
",",
"monitoring_enabled",
"=",
"monitoring_enabled",
",",
"subnet_id",
"=",
"subnet_id",
",",
"subnet_name",
"=",
"subnet_name",
",",
"private_ip_address",
"=",
"private_ip_address",
",",
"block_device_map",
"=",
"block_device_map",
",",
"disable_api_termination",
"=",
"disable_api_termination",
",",
"instance_initiated_shutdown_behavior",
"=",
"instance_initiated_shutdown_behavior",
",",
"placement_group",
"=",
"placement_group",
",",
"client_token",
"=",
"client_token",
",",
"security_group_ids",
"=",
"security_group_ids",
",",
"security_group_names",
"=",
"security_group_names",
",",
"additional_info",
"=",
"additional_info",
",",
"tenancy",
"=",
"tenancy",
",",
"instance_profile_arn",
"=",
"instance_profile_arn",
",",
"instance_profile_name",
"=",
"instance_profile_name",
",",
"ebs_optimized",
"=",
"ebs_optimized",
",",
"network_interfaces",
"=",
"network_interfaces",
",",
"network_interface_name",
"=",
"network_interface_name",
",",
"network_interface_id",
"=",
"network_interface_id",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"not",
"r",
"or",
"'instance_id'",
"not",
"in",
"r",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to create instance {0}.'",
".",
"format",
"(",
"instance_name",
"if",
"instance_name",
"else",
"name",
")",
"return",
"ret",
"instance_id",
"=",
"r",
"[",
"'instance_id'",
"]",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"'old'",
":",
"{",
"}",
",",
"'new'",
":",
"{",
"}",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"[",
"'instance_id'",
"]",
"=",
"None",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"[",
"'instance_id'",
"]",
"=",
"instance_id",
"# To avoid issues we only allocate new EIPs at instance creation.",
"# This might miss situations where an instance is initially created",
"# created without and one is added later, but the alternative is the",
"# risk of EIPs allocated at every state run.",
"if",
"allocate_eip",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'New EIP would be allocated.'",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"domain",
"=",
"'vpc'",
"if",
"vpc_id",
"or",
"vpc_name",
"else",
"None",
"r",
"=",
"__salt__",
"[",
"'boto_ec2.allocate_eip_address'",
"]",
"(",
"domain",
"=",
"domain",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"not",
"r",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to allocate new EIP.'",
"return",
"ret",
"allocation_id",
"=",
"r",
"[",
"'allocation_id'",
"]",
"log",
".",
"info",
"(",
"\"New EIP with address %s allocated.\"",
",",
"r",
"[",
"'public_ip'",
"]",
")",
"else",
":",
"log",
".",
"info",
"(",
"\"EIP not requested.\"",
")",
"if",
"public_ip",
"or",
"allocation_id",
":",
"# This can take a bit to show up, give it a chance to...",
"tries",
"=",
"10",
"secs",
"=",
"3",
"for",
"t",
"in",
"range",
"(",
"tries",
")",
":",
"r",
"=",
"__salt__",
"[",
"'boto_ec2.get_eip_address_info'",
"]",
"(",
"addresses",
"=",
"public_ip",
",",
"allocation_ids",
"=",
"allocation_id",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"r",
":",
"break",
"else",
":",
"log",
".",
"info",
"(",
"'Waiting up to %s secs for new EIP %s to become available'",
",",
"tries",
"*",
"secs",
",",
"public_ip",
"or",
"allocation_id",
")",
"time",
".",
"sleep",
"(",
"secs",
")",
"if",
"not",
"r",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to lookup EIP {0}.'",
".",
"format",
"(",
"public_ip",
"or",
"allocation_id",
")",
"return",
"ret",
"ip",
"=",
"r",
"[",
"0",
"]",
"[",
"'public_ip'",
"]",
"if",
"r",
"[",
"0",
"]",
".",
"get",
"(",
"'instance_id'",
")",
":",
"if",
"r",
"[",
"0",
"]",
"[",
"'instance_id'",
"]",
"!=",
"instance_id",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"(",
"'EIP {0} is already associated with instance '",
"'{1}.'",
".",
"format",
"(",
"public_ip",
"if",
"public_ip",
"else",
"allocation_id",
",",
"r",
"[",
"0",
"]",
"[",
"'instance_id'",
"]",
")",
")",
"return",
"ret",
"else",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Instance {0} to be updated.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"r",
"=",
"__salt__",
"[",
"'boto_ec2.associate_eip_address'",
"]",
"(",
"instance_id",
"=",
"instance_id",
",",
"public_ip",
"=",
"public_ip",
",",
"allocation_id",
"=",
"allocation_id",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"r",
":",
"if",
"'new'",
"not",
"in",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"[",
"'public_ip'",
"]",
"=",
"ip",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to attach EIP to instance {0}.'",
".",
"format",
"(",
"instance_name",
"if",
"instance_name",
"else",
"name",
")",
"return",
"ret",
"if",
"attributes",
":",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"attributes",
")",
":",
"curr",
"=",
"__salt__",
"[",
"'boto_ec2.get_attribute'",
"]",
"(",
"k",
",",
"instance_id",
"=",
"instance_id",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"curr",
"=",
"{",
"}",
"if",
"not",
"isinstance",
"(",
"curr",
",",
"dict",
")",
"else",
"curr",
"if",
"curr",
".",
"get",
"(",
"k",
")",
"==",
"v",
":",
"continue",
"else",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"changed_attrs",
"[",
"k",
"]",
"=",
"'The instance attribute {0} is set to be changed from \\'{1}\\' to \\'{2}\\'.'",
".",
"format",
"(",
"k",
",",
"curr",
".",
"get",
"(",
"k",
")",
",",
"v",
")",
"continue",
"try",
":",
"r",
"=",
"__salt__",
"[",
"'boto_ec2.set_attribute'",
"]",
"(",
"attribute",
"=",
"k",
",",
"attribute_value",
"=",
"v",
",",
"instance_id",
"=",
"instance_id",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"except",
"SaltInvocationError",
"as",
"e",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to set attribute {0} to {1} on instance {2}.'",
".",
"format",
"(",
"k",
",",
"v",
",",
"instance_name",
")",
"return",
"ret",
"ret",
"[",
"'changes'",
"]",
"=",
"ret",
"[",
"'changes'",
"]",
"if",
"ret",
"[",
"'changes'",
"]",
"else",
"{",
"'old'",
":",
"{",
"}",
",",
"'new'",
":",
"{",
"}",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"[",
"k",
"]",
"=",
"curr",
".",
"get",
"(",
"k",
")",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"[",
"k",
"]",
"=",
"v",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"if",
"changed_attrs",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"changed_attrs",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"else",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Instance {0} is in the correct state'",
".",
"format",
"(",
"instance_name",
"if",
"instance_name",
"else",
"name",
")",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"if",
"tags",
"and",
"instance_id",
"is",
"not",
"None",
":",
"tags",
"=",
"dict",
"(",
"tags",
")",
"curr_tags",
"=",
"dict",
"(",
"__salt__",
"[",
"'boto_ec2.get_all_tags'",
"]",
"(",
"filters",
"=",
"{",
"'resource-id'",
":",
"instance_id",
"}",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
".",
"get",
"(",
"instance_id",
",",
"{",
"}",
")",
")",
"current",
"=",
"set",
"(",
"curr_tags",
".",
"keys",
"(",
")",
")",
"desired",
"=",
"set",
"(",
"tags",
".",
"keys",
"(",
")",
")",
"remove",
"=",
"list",
"(",
"current",
"-",
"desired",
")",
"# Boto explicitly requires a list here and can't cope with a set...",
"add",
"=",
"dict",
"(",
"[",
"(",
"t",
",",
"tags",
"[",
"t",
"]",
")",
"for",
"t",
"in",
"desired",
"-",
"current",
"]",
")",
"replace",
"=",
"dict",
"(",
"[",
"(",
"t",
",",
"tags",
"[",
"t",
"]",
")",
"for",
"t",
"in",
"tags",
"if",
"tags",
".",
"get",
"(",
"t",
")",
"!=",
"curr_tags",
".",
"get",
"(",
"t",
")",
"]",
")",
"# Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative.",
"add",
".",
"update",
"(",
"replace",
")",
"if",
"add",
"or",
"remove",
":",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"=",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"if",
"'old'",
"in",
"ret",
"[",
"'changes'",
"]",
"else",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"if",
"'new'",
"in",
"ret",
"[",
"'changes'",
"]",
"else",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"[",
"'tags'",
"]",
"=",
"curr_tags",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"[",
"'tags'",
"]",
"=",
"tags",
"ret",
"[",
"'comment'",
"]",
"+=",
"' Tags would be updated on instance {0}.'",
".",
"format",
"(",
"instance_name",
"if",
"instance_name",
"else",
"name",
")",
"else",
":",
"if",
"remove",
":",
"if",
"not",
"__salt__",
"[",
"'boto_ec2.delete_tags'",
"]",
"(",
"resource_ids",
"=",
"instance_id",
",",
"tags",
"=",
"remove",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
":",
"msg",
"=",
"\"Error while deleting tags on instance {0}\"",
".",
"format",
"(",
"instance_name",
"if",
"instance_name",
"else",
"name",
")",
"log",
".",
"error",
"(",
"msg",
")",
"ret",
"[",
"'comment'",
"]",
"+=",
"' '",
"+",
"msg",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"if",
"add",
":",
"if",
"not",
"__salt__",
"[",
"'boto_ec2.create_tags'",
"]",
"(",
"resource_ids",
"=",
"instance_id",
",",
"tags",
"=",
"add",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
":",
"msg",
"=",
"\"Error while creating tags on instance {0}\"",
".",
"format",
"(",
"instance_name",
"if",
"instance_name",
"else",
"name",
")",
"log",
".",
"error",
"(",
"msg",
")",
"ret",
"[",
"'comment'",
"]",
"+=",
"' '",
"+",
"msg",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"return",
"ret",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"=",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"if",
"'old'",
"in",
"ret",
"[",
"'changes'",
"]",
"else",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"if",
"'new'",
"in",
"ret",
"[",
"'changes'",
"]",
"else",
"{",
"}",
"ret",
"[",
"'changes'",
"]",
"[",
"'old'",
"]",
"[",
"'tags'",
"]",
"=",
"curr_tags",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"[",
"'tags'",
"]",
"=",
"tags",
"return",
"ret"
] |
Ensure an EC2 instance is running with the given attributes and state.
name
(string) - The name of the state definition. Recommended that this
match the instance_name attribute (generally the FQDN of the instance).
instance_name
(string) - The name of the instance, generally its FQDN. Exclusive with
'instance_id'.
instance_id
(string) - The ID of the instance (if known). Exclusive with
'instance_name'.
image_id
(string) – The ID of the AMI image to run.
image_name
(string) – The name of the AMI image to run.
tags
(dict) - Tags to apply to the instance.
key_name
(string) – The name of the key pair with which to launch instances.
security_groups
(list of strings) – The names of the EC2 classic security groups with
which to associate instances
user_data
(string) – The Base64-encoded MIME user data to be made available to the
instance(s) in this reservation.
instance_type
(string) – The EC2 instance size/type. Note that only certain types are
compatible with HVM based AMIs.
placement
(string) – The Availability Zone to launch the instance into.
kernel_id
(string) – The ID of the kernel with which to launch the instances.
ramdisk_id
(string) – The ID of the RAM disk with which to launch the instances.
vpc_id
(string) - The ID of a VPC to attach the instance to.
vpc_name
(string) - The name of a VPC to attach the instance to.
monitoring_enabled
(bool) – Enable detailed CloudWatch monitoring on the instance.
subnet_id
(string) – The ID of the subnet within which to launch the instances for
VPC.
subnet_name
(string) – The name of the subnet within which to launch the instances
for VPC.
private_ip_address
(string) – If you’re using VPC, you can optionally use this parameter to
assign the instance a specific available IP address from the subnet
(e.g., 10.0.0.25).
block_device_map
(boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping
data structure describing the EBS volumes associated with the Image.
disable_api_termination
(bool) – If True, the instances will be locked and will not be able to
be terminated via the API.
instance_initiated_shutdown_behavior
(string) – Specifies whether the instance stops or terminates on
instance-initiated shutdown. Valid values are:
- 'stop'
- 'terminate'
placement_group
(string) – If specified, this is the name of the placement group in
which the instance(s) will be launched.
client_token
(string) – Unique, case-sensitive identifier you provide to ensure
idempotency of the request. Maximum 64 ASCII characters.
security_group_ids
(list of strings) – The IDs of the VPC security groups with which to
associate instances.
security_group_names
(list of strings) – The names of the VPC security groups with which to
associate instances.
additional_info
(string) – Specifies additional information to make available to the
instance(s).
tenancy
(string) – The tenancy of the instance you want to launch. An instance
with a tenancy of ‘dedicated’ runs on single-tenant hardware and can
only be launched into a VPC. Valid values are:”default” or “dedicated”.
NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well.
instance_profile_arn
(string) – The Amazon resource name (ARN) of the IAM Instance Profile
(IIP) to associate with the instances.
instance_profile_name
(string) – The name of the IAM Instance Profile (IIP) to associate with
the instances.
ebs_optimized
(bool) – Whether the instance is optimized for EBS I/O. This
optimization provides dedicated throughput to Amazon EBS and a tuned
configuration stack to provide optimal EBS I/O performance. This
optimization isn’t available with all instance types.
network_interfaces
(boto.ec2.networkinterface.NetworkInterfaceCollection) – A
NetworkInterfaceCollection data structure containing the ENI
specifications for the instance.
network_interface_name
(string) - The name of Elastic Network Interface to attach
.. versionadded:: 2016.11.0
network_interface_id
(string) - The id of Elastic Network Interface to attach
.. versionadded:: 2016.11.0
attributes
(dict) - Instance attributes and value to be applied to the instance.
Available options are:
- instanceType - A valid instance type (m1.small)
- kernel - Kernel ID (None)
- ramdisk - Ramdisk ID (None)
- userData - Base64 encoded String (None)
- disableApiTermination - Boolean (true)
- instanceInitiatedShutdownBehavior - stop|terminate
- blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’]
- sourceDestCheck - Boolean (true)
- groupSet - Set of Security Groups or IDs
- ebsOptimized - Boolean (false)
- sriovNetSupport - String - ie: ‘simple’
target_state
(string) - The desired target state of the instance. Available options
are:
- running
- stopped
Note that this option is currently UNIMPLEMENTED.
public_ip:
(string) - The IP of a previously allocated EIP address, which will be
attached to the instance. EC2 Classic instances ONLY - for VCP pass in
an allocation_id instead.
allocation_id:
(string) - The ID of a previously allocated EIP address, which will be
attached to the instance. VPC instances ONLY - for Classic pass in
a public_ip instead.
allocate_eip:
(bool) - Allocate and attach an EIP on-the-fly for this instance. Note
you'll want to releaase this address when terminating the instance,
either manually or via the 'release_eip' flag to 'instance_absent'.
region
(string) - Region to connect to.
key
(string) - Secret key to be used.
keyid
(string) - Access key to be used.
profile
(variable) - A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
.. versionadded:: 2016.3.0
|
[
"Ensure",
"an",
"EC2",
"instance",
"is",
"running",
"with",
"the",
"given",
"attributes",
"and",
"state",
"."
] |
python
|
train
| 48.9675 |
bcbio/bcbio-nextgen
|
bcbio/variation/validateplot.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L230-L244
|
def _check_cats(cats, vtypes, df, prep, callers):
"""Only include categories in the final output if they have values.
"""
out = []
for cat in cats:
all_vals = []
for vtype in vtypes:
vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
all_vals.extend(vals)
if sum(all_vals) / float(len(all_vals)) > 2:
out.append(cat)
if len(out) == 0:
return cats
else:
return out
|
[
"def",
"_check_cats",
"(",
"cats",
",",
"vtypes",
",",
"df",
",",
"prep",
",",
"callers",
")",
":",
"out",
"=",
"[",
"]",
"for",
"cat",
"in",
"cats",
":",
"all_vals",
"=",
"[",
"]",
"for",
"vtype",
"in",
"vtypes",
":",
"vals",
",",
"labels",
",",
"maxval",
"=",
"_get_chart_info",
"(",
"df",
",",
"vtype",
",",
"cat",
",",
"prep",
",",
"callers",
")",
"all_vals",
".",
"extend",
"(",
"vals",
")",
"if",
"sum",
"(",
"all_vals",
")",
"/",
"float",
"(",
"len",
"(",
"all_vals",
")",
")",
">",
"2",
":",
"out",
".",
"append",
"(",
"cat",
")",
"if",
"len",
"(",
"out",
")",
"==",
"0",
":",
"return",
"cats",
"else",
":",
"return",
"out"
] |
Only include categories in the final output if they have values.
|
[
"Only",
"include",
"categories",
"in",
"the",
"final",
"output",
"if",
"they",
"have",
"values",
"."
] |
python
|
train
| 31.2 |
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py#L241-L255
|
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_wwn = ET.SubElement(fcoe_intf_list, "fcoe-intf-wwn")
fcoe_intf_wwn.text = kwargs.pop('fcoe_intf_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_wwn",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"fcoe_get_interface",
"=",
"ET",
".",
"Element",
"(",
"\"fcoe_get_interface\"",
")",
"config",
"=",
"fcoe_get_interface",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"fcoe_get_interface",
",",
"\"output\"",
")",
"fcoe_intf_list",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"fcoe-intf-list\"",
")",
"fcoe_intf_fcoe_port_id_key",
"=",
"ET",
".",
"SubElement",
"(",
"fcoe_intf_list",
",",
"\"fcoe-intf-fcoe-port-id\"",
")",
"fcoe_intf_fcoe_port_id_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'fcoe_intf_fcoe_port_id'",
")",
"fcoe_intf_wwn",
"=",
"ET",
".",
"SubElement",
"(",
"fcoe_intf_list",
",",
"\"fcoe-intf-wwn\"",
")",
"fcoe_intf_wwn",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'fcoe_intf_wwn'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
| 50.466667 |
cdeboever3/cdpybio
|
cdpybio/variants.py
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/variants.py#L32-L81
|
def wasp_snp_directory(vcf, directory, sample_name=None):
"""
Convert VCF file into input for WASP. Only bi-allelic heterozygous sites are
used.
Parameters:
-----------
vcf : str
Path to VCF file.
directory : str
Output directory. This is the directory that will hold the files for
WASP.
sample_name : str
If provided, use this sample name to get heterozygous SNPs from VCF
file.
"""
chrom = []
pos = []
ref = []
alt = []
vcf_reader = pyvcf.Reader(open(vcf, 'r'))
if sample_name:
def condition(record, sample_name):
return sample_name in [x.sample for x in record.get_hets()]
else:
def condition(record, sample_name):
return len(record.get_hets()) > 0
for record in vcf_reader:
if condition(record, sample_name):
if len(record.ALT) == 1:
chrom.append(record.CHROM)
pos.append(record.POS)
ref.append(record.REF)
alt.append(record.ALT[0].sequence)
df = pd.DataFrame([chrom, pos, ref, alt],
index=['chrom', 'position', 'RefAllele', 'AltAllele']).T
if not os.path.exists(directory):
os.makedirs(directory)
for c in set(df.chrom):
tdf = df[df.chrom == c]
if tdf.shape[0] > 0:
f = gzip.open(os.path.join(directory, '{}.snps.txt.gz'.format(c)),
'wb')
lines = (tdf.position.astype(str) + '\t' + tdf.RefAllele + '\t' +
tdf.AltAllele)
f.write('\n'.join(lines) + '\n')
f.close()
|
[
"def",
"wasp_snp_directory",
"(",
"vcf",
",",
"directory",
",",
"sample_name",
"=",
"None",
")",
":",
"chrom",
"=",
"[",
"]",
"pos",
"=",
"[",
"]",
"ref",
"=",
"[",
"]",
"alt",
"=",
"[",
"]",
"vcf_reader",
"=",
"pyvcf",
".",
"Reader",
"(",
"open",
"(",
"vcf",
",",
"'r'",
")",
")",
"if",
"sample_name",
":",
"def",
"condition",
"(",
"record",
",",
"sample_name",
")",
":",
"return",
"sample_name",
"in",
"[",
"x",
".",
"sample",
"for",
"x",
"in",
"record",
".",
"get_hets",
"(",
")",
"]",
"else",
":",
"def",
"condition",
"(",
"record",
",",
"sample_name",
")",
":",
"return",
"len",
"(",
"record",
".",
"get_hets",
"(",
")",
")",
">",
"0",
"for",
"record",
"in",
"vcf_reader",
":",
"if",
"condition",
"(",
"record",
",",
"sample_name",
")",
":",
"if",
"len",
"(",
"record",
".",
"ALT",
")",
"==",
"1",
":",
"chrom",
".",
"append",
"(",
"record",
".",
"CHROM",
")",
"pos",
".",
"append",
"(",
"record",
".",
"POS",
")",
"ref",
".",
"append",
"(",
"record",
".",
"REF",
")",
"alt",
".",
"append",
"(",
"record",
".",
"ALT",
"[",
"0",
"]",
".",
"sequence",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"chrom",
",",
"pos",
",",
"ref",
",",
"alt",
"]",
",",
"index",
"=",
"[",
"'chrom'",
",",
"'position'",
",",
"'RefAllele'",
",",
"'AltAllele'",
"]",
")",
".",
"T",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"directory",
")",
":",
"os",
".",
"makedirs",
"(",
"directory",
")",
"for",
"c",
"in",
"set",
"(",
"df",
".",
"chrom",
")",
":",
"tdf",
"=",
"df",
"[",
"df",
".",
"chrom",
"==",
"c",
"]",
"if",
"tdf",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"f",
"=",
"gzip",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'{}.snps.txt.gz'",
".",
"format",
"(",
"c",
")",
")",
",",
"'wb'",
")",
"lines",
"=",
"(",
"tdf",
".",
"position",
".",
"astype",
"(",
"str",
")",
"+",
"'\\t'",
"+",
"tdf",
".",
"RefAllele",
"+",
"'\\t'",
"+",
"tdf",
".",
"AltAllele",
")",
"f",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
")",
"+",
"'\\n'",
")",
"f",
".",
"close",
"(",
")"
] |
Convert VCF file into input for WASP. Only bi-allelic heterozygous sites are
used.
Parameters:
-----------
vcf : str
Path to VCF file.
directory : str
Output directory. This is the directory that will hold the files for
WASP.
sample_name : str
If provided, use this sample name to get heterozygous SNPs from VCF
file.
|
[
"Convert",
"VCF",
"file",
"into",
"input",
"for",
"WASP",
".",
"Only",
"bi",
"-",
"allelic",
"heterozygous",
"sites",
"are",
"used",
"."
] |
python
|
train
| 32.16 |
pyslackers/slack-sansio
|
slack/io/requests.py
|
https://github.com/pyslackers/slack-sansio/blob/068ddd6480c6d2f9bf14fa4db498c9fe1017f4ab/slack/io/requests.py#L159-L178
|
def rtm( # type: ignore
self, url: Optional[str] = None, bot_id: Optional[str] = None
) -> Iterator[events.Event]:
"""
Iterate over event from the RTM API
Args:
url: Websocket connection url
bot_id: Connecting bot ID
Returns:
:class:`slack.events.Event` or :class:`slack.events.Message`
"""
while True:
bot_id = bot_id or self._find_bot_id()
url = url or self._find_rtm_url()
for event in self._incoming_from_rtm(url, bot_id):
yield event
url = None
|
[
"def",
"rtm",
"(",
"# type: ignore",
"self",
",",
"url",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"bot_id",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Iterator",
"[",
"events",
".",
"Event",
"]",
":",
"while",
"True",
":",
"bot_id",
"=",
"bot_id",
"or",
"self",
".",
"_find_bot_id",
"(",
")",
"url",
"=",
"url",
"or",
"self",
".",
"_find_rtm_url",
"(",
")",
"for",
"event",
"in",
"self",
".",
"_incoming_from_rtm",
"(",
"url",
",",
"bot_id",
")",
":",
"yield",
"event",
"url",
"=",
"None"
] |
Iterate over event from the RTM API
Args:
url: Websocket connection url
bot_id: Connecting bot ID
Returns:
:class:`slack.events.Event` or :class:`slack.events.Message`
|
[
"Iterate",
"over",
"event",
"from",
"the",
"RTM",
"API"
] |
python
|
train
| 29.7 |
aio-libs/aiomysql
|
aiomysql/connection.py
|
https://github.com/aio-libs/aiomysql/blob/131fb9f914739ff01a24b402d29bfd719f2d1a8b/aiomysql/connection.py#L295-L300
|
def close(self):
"""Close socket connection"""
if self._writer:
self._writer.transport.close()
self._writer = None
self._reader = None
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_writer",
":",
"self",
".",
"_writer",
".",
"transport",
".",
"close",
"(",
")",
"self",
".",
"_writer",
"=",
"None",
"self",
".",
"_reader",
"=",
"None"
] |
Close socket connection
|
[
"Close",
"socket",
"connection"
] |
python
|
train
| 28.833333 |
weld-project/weld
|
python/grizzly/grizzly/grizzly_impl.py
|
https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/grizzly/grizzly/grizzly_impl.py#L195-L247
|
def pivot_filter(pivot_array, predicates, ty=None):
"""
Returns a new array, with each element in the original array satisfying the
passed-in predicate set to `new_value`
Args:
array (WeldObject / Numpy.ndarray): Input array
predicates (WeldObject / Numpy.ndarray<bool>): Predicate set
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
pivot_array_var = weld_obj.update(pivot_array)
if isinstance(pivot_array, WeldObject):
pivot_array_var = pivot_array.obj_id
weld_obj.dependencies[pivot_array_var] = pivot_array
predicates_var = weld_obj.update(predicates)
if isinstance(predicates, WeldObject):
predicates_var = predicates.obj_id
weld_obj.dependencies[predicates_var] = predicates
weld_template = """
let index_filtered =
result(
for(
zip(%(array)s.$0, %(predicates)s),
appender,
|b, i, e| if (e.$1, merge(b, e.$0), b)
)
);
let pivot_filtered =
map(
%(array)s.$1,
|x|
result(
for(
zip(x, %(predicates)s),
appender,
|b, i, e| if (e.$1, merge(b, e.$0), b)
)
)
);
{index_filtered, pivot_filtered, %(array)s.$2}
"""
weld_obj.weld_code = weld_template % {
"array": pivot_array_var,
"predicates": predicates_var}
return weld_obj
|
[
"def",
"pivot_filter",
"(",
"pivot_array",
",",
"predicates",
",",
"ty",
"=",
"None",
")",
":",
"weld_obj",
"=",
"WeldObject",
"(",
"encoder_",
",",
"decoder_",
")",
"pivot_array_var",
"=",
"weld_obj",
".",
"update",
"(",
"pivot_array",
")",
"if",
"isinstance",
"(",
"pivot_array",
",",
"WeldObject",
")",
":",
"pivot_array_var",
"=",
"pivot_array",
".",
"obj_id",
"weld_obj",
".",
"dependencies",
"[",
"pivot_array_var",
"]",
"=",
"pivot_array",
"predicates_var",
"=",
"weld_obj",
".",
"update",
"(",
"predicates",
")",
"if",
"isinstance",
"(",
"predicates",
",",
"WeldObject",
")",
":",
"predicates_var",
"=",
"predicates",
".",
"obj_id",
"weld_obj",
".",
"dependencies",
"[",
"predicates_var",
"]",
"=",
"predicates",
"weld_template",
"=",
"\"\"\"\n let index_filtered =\n result(\n for(\n zip(%(array)s.$0, %(predicates)s),\n appender,\n |b, i, e| if (e.$1, merge(b, e.$0), b)\n )\n );\n let pivot_filtered =\n map(\n %(array)s.$1,\n |x|\n result(\n for(\n zip(x, %(predicates)s),\n appender,\n |b, i, e| if (e.$1, merge(b, e.$0), b)\n )\n )\n );\n {index_filtered, pivot_filtered, %(array)s.$2}\n \"\"\"",
"weld_obj",
".",
"weld_code",
"=",
"weld_template",
"%",
"{",
"\"array\"",
":",
"pivot_array_var",
",",
"\"predicates\"",
":",
"predicates_var",
"}",
"return",
"weld_obj"
] |
Returns a new array, with each element in the original array satisfying the
passed-in predicate set to `new_value`
Args:
array (WeldObject / Numpy.ndarray): Input array
predicates (WeldObject / Numpy.ndarray<bool>): Predicate set
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
|
[
"Returns",
"a",
"new",
"array",
"with",
"each",
"element",
"in",
"the",
"original",
"array",
"satisfying",
"the",
"passed",
"-",
"in",
"predicate",
"set",
"to",
"new_value"
] |
python
|
train
| 28.396226 |
LonamiWebs/Telethon
|
telethon_examples/interactive_telegram_client.py
|
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_examples/interactive_telegram_client.py#L16-L23
|
def sprint(string, *args, **kwargs):
"""Safe Print (handle UnicodeEncodeErrors on some terminals)"""
try:
print(string, *args, **kwargs)
except UnicodeEncodeError:
string = string.encode('utf-8', errors='ignore')\
.decode('ascii', errors='ignore')
print(string, *args, **kwargs)
|
[
"def",
"sprint",
"(",
"string",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"print",
"(",
"string",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"UnicodeEncodeError",
":",
"string",
"=",
"string",
".",
"encode",
"(",
"'utf-8'",
",",
"errors",
"=",
"'ignore'",
")",
".",
"decode",
"(",
"'ascii'",
",",
"errors",
"=",
"'ignore'",
")",
"print",
"(",
"string",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Safe Print (handle UnicodeEncodeErrors on some terminals)
|
[
"Safe",
"Print",
"(",
"handle",
"UnicodeEncodeErrors",
"on",
"some",
"terminals",
")"
] |
python
|
train
| 41.25 |
spyder-ide/spyder
|
spyder/utils/qthelpers.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/qthelpers.py#L44-L48
|
def get_image_label(name, default="not_found.png"):
"""Return image inside a QLabel object"""
label = QLabel()
label.setPixmap(QPixmap(get_image_path(name, default)))
return label
|
[
"def",
"get_image_label",
"(",
"name",
",",
"default",
"=",
"\"not_found.png\"",
")",
":",
"label",
"=",
"QLabel",
"(",
")",
"label",
".",
"setPixmap",
"(",
"QPixmap",
"(",
"get_image_path",
"(",
"name",
",",
"default",
")",
")",
")",
"return",
"label"
] |
Return image inside a QLabel object
|
[
"Return",
"image",
"inside",
"a",
"QLabel",
"object"
] |
python
|
train
| 39 |
SoCo/SoCo
|
soco/services.py
|
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/services.py#L786-L790
|
def GetZoneGroupState(self, *args, **kwargs):
"""Overrides default handling to use the global shared zone group state
cache, unless another cache is specified."""
kwargs['cache'] = kwargs.get('cache', zone_group_state_shared_cache)
return self.send_command('GetZoneGroupState', *args, **kwargs)
|
[
"def",
"GetZoneGroupState",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'cache'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'cache'",
",",
"zone_group_state_shared_cache",
")",
"return",
"self",
".",
"send_command",
"(",
"'GetZoneGroupState'",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Overrides default handling to use the global shared zone group state
cache, unless another cache is specified.
|
[
"Overrides",
"default",
"handling",
"to",
"use",
"the",
"global",
"shared",
"zone",
"group",
"state",
"cache",
"unless",
"another",
"cache",
"is",
"specified",
"."
] |
python
|
train
| 64.4 |
tomduck/pandoc-eqnos
|
pandoc_eqnos.py
|
https://github.com/tomduck/pandoc-eqnos/blob/a0e2b5684d2024ea96049ed2cff3acf4ab47c541/pandoc_eqnos.py#L158-L208
|
def process_equations(key, value, fmt, meta):
"""Processes the attributed equations."""
if key == 'Math' and len(value) == 3:
# Process the equation
eq = _process_equation(value, fmt)
# Get the attributes and label
attrs = eq['attrs']
label = attrs[0]
if eq['is_unreferenceable']:
attrs[0] = '' # The label isn't needed outside this function
# Context-dependent output
if eq['is_unnumbered']: # Unnumbered is also unreferenceable
return None
elif fmt in ['latex', 'beamer']:
return RawInline('tex',
r'\begin{equation}%s\end{equation}'%value[-1])
elif fmt in ('html', 'html5') and LABEL_PATTERN.match(label):
# Present equation and its number in a span
text = str(references[label])
outerspan = RawInline('html',
'<span %s style="display: inline-block; '
'position: relative; width: 100%%">'%(''\
if eq['is_unreferenceable'] \
else 'id="%s"'%label))
innerspan = RawInline('html',
'<span style="position: absolute; '
'right: 0em; top: %s; line-height:0; '
'text-align: right">' %
('0' if text.startswith('$') and
text.endswith('$') else '50%',))
num = Math({"t":"InlineMath"}, '(%s)' % text[1:-1]) \
if text.startswith('$') and text.endswith('$') \
else Str('(%s)' % text)
endspans = RawInline('html', '</span></span>')
return [outerspan, AttrMath(*value), innerspan, num, endspans]
elif fmt == 'docx':
# As per http://officeopenxml.com/WPhyperlink.php
bookmarkstart = \
RawInline('openxml',
'<w:bookmarkStart w:id="0" w:name="%s"/><w:r><w:t>'
%label)
bookmarkend = \
RawInline('openxml',
'</w:t></w:r><w:bookmarkEnd w:id="0"/>')
return [bookmarkstart, AttrMath(*value), bookmarkend]
return None
|
[
"def",
"process_equations",
"(",
"key",
",",
"value",
",",
"fmt",
",",
"meta",
")",
":",
"if",
"key",
"==",
"'Math'",
"and",
"len",
"(",
"value",
")",
"==",
"3",
":",
"# Process the equation",
"eq",
"=",
"_process_equation",
"(",
"value",
",",
"fmt",
")",
"# Get the attributes and label",
"attrs",
"=",
"eq",
"[",
"'attrs'",
"]",
"label",
"=",
"attrs",
"[",
"0",
"]",
"if",
"eq",
"[",
"'is_unreferenceable'",
"]",
":",
"attrs",
"[",
"0",
"]",
"=",
"''",
"# The label isn't needed outside this function",
"# Context-dependent output",
"if",
"eq",
"[",
"'is_unnumbered'",
"]",
":",
"# Unnumbered is also unreferenceable",
"return",
"None",
"elif",
"fmt",
"in",
"[",
"'latex'",
",",
"'beamer'",
"]",
":",
"return",
"RawInline",
"(",
"'tex'",
",",
"r'\\begin{equation}%s\\end{equation}'",
"%",
"value",
"[",
"-",
"1",
"]",
")",
"elif",
"fmt",
"in",
"(",
"'html'",
",",
"'html5'",
")",
"and",
"LABEL_PATTERN",
".",
"match",
"(",
"label",
")",
":",
"# Present equation and its number in a span",
"text",
"=",
"str",
"(",
"references",
"[",
"label",
"]",
")",
"outerspan",
"=",
"RawInline",
"(",
"'html'",
",",
"'<span %s style=\"display: inline-block; '",
"'position: relative; width: 100%%\">'",
"%",
"(",
"''",
"if",
"eq",
"[",
"'is_unreferenceable'",
"]",
"else",
"'id=\"%s\"'",
"%",
"label",
")",
")",
"innerspan",
"=",
"RawInline",
"(",
"'html'",
",",
"'<span style=\"position: absolute; '",
"'right: 0em; top: %s; line-height:0; '",
"'text-align: right\">'",
"%",
"(",
"'0'",
"if",
"text",
".",
"startswith",
"(",
"'$'",
")",
"and",
"text",
".",
"endswith",
"(",
"'$'",
")",
"else",
"'50%'",
",",
")",
")",
"num",
"=",
"Math",
"(",
"{",
"\"t\"",
":",
"\"InlineMath\"",
"}",
",",
"'(%s)'",
"%",
"text",
"[",
"1",
":",
"-",
"1",
"]",
")",
"if",
"text",
".",
"startswith",
"(",
"'$'",
")",
"and",
"text",
".",
"endswith",
"(",
"'$'",
")",
"else",
"Str",
"(",
"'(%s)'",
"%",
"text",
")",
"endspans",
"=",
"RawInline",
"(",
"'html'",
",",
"'</span></span>'",
")",
"return",
"[",
"outerspan",
",",
"AttrMath",
"(",
"*",
"value",
")",
",",
"innerspan",
",",
"num",
",",
"endspans",
"]",
"elif",
"fmt",
"==",
"'docx'",
":",
"# As per http://officeopenxml.com/WPhyperlink.php",
"bookmarkstart",
"=",
"RawInline",
"(",
"'openxml'",
",",
"'<w:bookmarkStart w:id=\"0\" w:name=\"%s\"/><w:r><w:t>'",
"%",
"label",
")",
"bookmarkend",
"=",
"RawInline",
"(",
"'openxml'",
",",
"'</w:t></w:r><w:bookmarkEnd w:id=\"0\"/>'",
")",
"return",
"[",
"bookmarkstart",
",",
"AttrMath",
"(",
"*",
"value",
")",
",",
"bookmarkend",
"]",
"return",
"None"
] |
Processes the attributed equations.
|
[
"Processes",
"the",
"attributed",
"equations",
"."
] |
python
|
train
| 44.745098 |
assemblerflow/flowcraft
|
flowcraft/templates/assembly_report.py
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/assembly_report.py#L143-L181
|
def _parse_assembly(self, assembly_file):
"""Parse an assembly file in fasta format.
This is a Fasta parsing method that populates the
:py:attr:`Assembly.contigs` attribute with data for each contig in the
assembly.
Parameters
----------
assembly_file : str
Path to the assembly fasta file.
"""
with open(assembly_file) as fh:
header = None
logger.debug("Starting iteration of assembly file: {}".format(
assembly_file))
for line in fh:
# Skip empty lines
if not line.strip():
continue
if line.startswith(">"):
# Add contig header to contig dictionary
header = line[1:].strip()
self.contigs[header] = []
else:
# Add sequence string for the current contig
self.contigs[header].append(line.strip())
# After populating the contigs dictionary, convert the values
# list into a string sequence
self.contigs = OrderedDict(
(header, "".join(seq)) for header, seq in self.contigs.items())
|
[
"def",
"_parse_assembly",
"(",
"self",
",",
"assembly_file",
")",
":",
"with",
"open",
"(",
"assembly_file",
")",
"as",
"fh",
":",
"header",
"=",
"None",
"logger",
".",
"debug",
"(",
"\"Starting iteration of assembly file: {}\"",
".",
"format",
"(",
"assembly_file",
")",
")",
"for",
"line",
"in",
"fh",
":",
"# Skip empty lines",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"\">\"",
")",
":",
"# Add contig header to contig dictionary",
"header",
"=",
"line",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
"self",
".",
"contigs",
"[",
"header",
"]",
"=",
"[",
"]",
"else",
":",
"# Add sequence string for the current contig",
"self",
".",
"contigs",
"[",
"header",
"]",
".",
"append",
"(",
"line",
".",
"strip",
"(",
")",
")",
"# After populating the contigs dictionary, convert the values",
"# list into a string sequence",
"self",
".",
"contigs",
"=",
"OrderedDict",
"(",
"(",
"header",
",",
"\"\"",
".",
"join",
"(",
"seq",
")",
")",
"for",
"header",
",",
"seq",
"in",
"self",
".",
"contigs",
".",
"items",
"(",
")",
")"
] |
Parse an assembly file in fasta format.
This is a Fasta parsing method that populates the
:py:attr:`Assembly.contigs` attribute with data for each contig in the
assembly.
Parameters
----------
assembly_file : str
Path to the assembly fasta file.
|
[
"Parse",
"an",
"assembly",
"file",
"in",
"fasta",
"format",
"."
] |
python
|
test
| 31.384615 |
androguard/androguard
|
androguard/core/bytecodes/dvm.py
|
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L2294-L2303
|
def get_class_name(self):
"""
Return the class name of the field
:rtype: string
"""
if self.class_idx_value is None:
self.class_idx_value = self.CM.get_type(self.class_idx)
return self.class_idx_value
|
[
"def",
"get_class_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"class_idx_value",
"is",
"None",
":",
"self",
".",
"class_idx_value",
"=",
"self",
".",
"CM",
".",
"get_type",
"(",
"self",
".",
"class_idx",
")",
"return",
"self",
".",
"class_idx_value"
] |
Return the class name of the field
:rtype: string
|
[
"Return",
"the",
"class",
"name",
"of",
"the",
"field"
] |
python
|
train
| 25.3 |
RedHatInsights/insights-core
|
insights/client/mount.py
|
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L112-L118
|
def _get_fs(thin_pathname):
"""
Returns the file system type (xfs, ext4) of a given device
"""
cmd = ['lsblk', '-o', 'FSTYPE', '-n', thin_pathname]
fs_return = util.subp(cmd)
return fs_return.stdout.strip()
|
[
"def",
"_get_fs",
"(",
"thin_pathname",
")",
":",
"cmd",
"=",
"[",
"'lsblk'",
",",
"'-o'",
",",
"'FSTYPE'",
",",
"'-n'",
",",
"thin_pathname",
"]",
"fs_return",
"=",
"util",
".",
"subp",
"(",
"cmd",
")",
"return",
"fs_return",
".",
"stdout",
".",
"strip",
"(",
")"
] |
Returns the file system type (xfs, ext4) of a given device
|
[
"Returns",
"the",
"file",
"system",
"type",
"(",
"xfs",
"ext4",
")",
"of",
"a",
"given",
"device"
] |
python
|
train
| 35.428571 |
hamelsmu/ktext
|
ktext/preprocess.py
|
https://github.com/hamelsmu/ktext/blob/221f09f5b1762705075fd1bd914881c0724d5e02/ktext/preprocess.py#L60-L78
|
def apply_parallel(func: Callable,
data: List[Any],
cpu_cores: int = None) -> List[Any]:
"""
Apply function to list of elements.
Automatically determines the chunk size.
"""
if not cpu_cores:
cpu_cores = cpu_count()
try:
chunk_size = ceil(len(data) / cpu_cores)
pool = Pool(cpu_cores)
transformed_data = pool.map(func, chunked(data, chunk_size), chunksize=1)
finally:
pool.close()
pool.join()
return transformed_data
|
[
"def",
"apply_parallel",
"(",
"func",
":",
"Callable",
",",
"data",
":",
"List",
"[",
"Any",
"]",
",",
"cpu_cores",
":",
"int",
"=",
"None",
")",
"->",
"List",
"[",
"Any",
"]",
":",
"if",
"not",
"cpu_cores",
":",
"cpu_cores",
"=",
"cpu_count",
"(",
")",
"try",
":",
"chunk_size",
"=",
"ceil",
"(",
"len",
"(",
"data",
")",
"/",
"cpu_cores",
")",
"pool",
"=",
"Pool",
"(",
"cpu_cores",
")",
"transformed_data",
"=",
"pool",
".",
"map",
"(",
"func",
",",
"chunked",
"(",
"data",
",",
"chunk_size",
")",
",",
"chunksize",
"=",
"1",
")",
"finally",
":",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"return",
"transformed_data"
] |
Apply function to list of elements.
Automatically determines the chunk size.
|
[
"Apply",
"function",
"to",
"list",
"of",
"elements",
"."
] |
python
|
test
| 27.473684 |
shad7/tvrenamer
|
tasks.py
|
https://github.com/shad7/tvrenamer/blob/7fb59cb02669357e73b7acb92dcb6d74fdff4654/tasks.py#L220-L234
|
def clean(all=False, docs=False, dist=False, extra=None):
"""Clean up build files"""
run('find . -type f -name "*.py[co]" -delete')
run('find . -type d -name "__pycache__" -delete')
patterns = ['build', '*.egg-info/']
if all or docs:
patterns.append('doc/build/*')
if all or dist:
patterns.append('dist')
if extra:
patterns.append(extra)
for pattern in patterns:
run('rm -rf {}'.format(pattern))
|
[
"def",
"clean",
"(",
"all",
"=",
"False",
",",
"docs",
"=",
"False",
",",
"dist",
"=",
"False",
",",
"extra",
"=",
"None",
")",
":",
"run",
"(",
"'find . -type f -name \"*.py[co]\" -delete'",
")",
"run",
"(",
"'find . -type d -name \"__pycache__\" -delete'",
")",
"patterns",
"=",
"[",
"'build'",
",",
"'*.egg-info/'",
"]",
"if",
"all",
"or",
"docs",
":",
"patterns",
".",
"append",
"(",
"'doc/build/*'",
")",
"if",
"all",
"or",
"dist",
":",
"patterns",
".",
"append",
"(",
"'dist'",
")",
"if",
"extra",
":",
"patterns",
".",
"append",
"(",
"extra",
")",
"for",
"pattern",
"in",
"patterns",
":",
"run",
"(",
"'rm -rf {}'",
".",
"format",
"(",
"pattern",
")",
")"
] |
Clean up build files
|
[
"Clean",
"up",
"build",
"files"
] |
python
|
train
| 29.8 |
pybel/pybel
|
src/pybel/parser/parse_bel.py
|
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/parser/parse_bel.py#L720-L741
|
def _add_qualified_edge(self, u, v, relation, annotations, subject_modifier, object_modifier) -> str:
"""Add an edge, then adds the opposite direction edge if it should."""
sha512 = self._add_qualified_edge_helper(
u,
v,
relation=relation,
annotations=annotations,
subject_modifier=subject_modifier,
object_modifier=object_modifier,
)
if relation in TWO_WAY_RELATIONS:
self._add_qualified_edge_helper(
v,
u,
relation=relation,
annotations=annotations,
object_modifier=subject_modifier,
subject_modifier=object_modifier,
)
return sha512
|
[
"def",
"_add_qualified_edge",
"(",
"self",
",",
"u",
",",
"v",
",",
"relation",
",",
"annotations",
",",
"subject_modifier",
",",
"object_modifier",
")",
"->",
"str",
":",
"sha512",
"=",
"self",
".",
"_add_qualified_edge_helper",
"(",
"u",
",",
"v",
",",
"relation",
"=",
"relation",
",",
"annotations",
"=",
"annotations",
",",
"subject_modifier",
"=",
"subject_modifier",
",",
"object_modifier",
"=",
"object_modifier",
",",
")",
"if",
"relation",
"in",
"TWO_WAY_RELATIONS",
":",
"self",
".",
"_add_qualified_edge_helper",
"(",
"v",
",",
"u",
",",
"relation",
"=",
"relation",
",",
"annotations",
"=",
"annotations",
",",
"object_modifier",
"=",
"subject_modifier",
",",
"subject_modifier",
"=",
"object_modifier",
",",
")",
"return",
"sha512"
] |
Add an edge, then adds the opposite direction edge if it should.
|
[
"Add",
"an",
"edge",
"then",
"adds",
"the",
"opposite",
"direction",
"edge",
"if",
"it",
"should",
"."
] |
python
|
train
| 34 |
genialis/resolwe
|
resolwe/flow/signals.py
|
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/signals.py#L48-L61
|
def delete_relation(sender, instance, **kwargs):
"""Delete the Relation object when the last Entity is removed."""
def process_signal(relation_id):
"""Get the relation and delete it if it has no entities left."""
try:
relation = Relation.objects.get(pk=relation_id)
except Relation.DoesNotExist:
return
if relation.entities.count() == 0:
relation.delete()
# Wait for partitions to be recreated.
transaction.on_commit(lambda: process_signal(instance.relation_id))
|
[
"def",
"delete_relation",
"(",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"process_signal",
"(",
"relation_id",
")",
":",
"\"\"\"Get the relation and delete it if it has no entities left.\"\"\"",
"try",
":",
"relation",
"=",
"Relation",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"relation_id",
")",
"except",
"Relation",
".",
"DoesNotExist",
":",
"return",
"if",
"relation",
".",
"entities",
".",
"count",
"(",
")",
"==",
"0",
":",
"relation",
".",
"delete",
"(",
")",
"# Wait for partitions to be recreated.",
"transaction",
".",
"on_commit",
"(",
"lambda",
":",
"process_signal",
"(",
"instance",
".",
"relation_id",
")",
")"
] |
Delete the Relation object when the last Entity is removed.
|
[
"Delete",
"the",
"Relation",
"object",
"when",
"the",
"last",
"Entity",
"is",
"removed",
"."
] |
python
|
train
| 38.214286 |
googleapis/google-cloud-python
|
oslogin/google/cloud/oslogin_v1/gapic/os_login_service_client.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/oslogin/google/cloud/oslogin_v1/gapic/os_login_service_client.py#L195-L248
|
def delete_posix_account(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a POSIX account.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.project_path('[USER]', '[PROJECT]')
>>>
>>> client.delete_posix_account(name)
Args:
name (str): A reference to the POSIX account to update. POSIX accounts are
identified by the project ID they are associated with. A reference to
the POSIX account is in format ``users/{user}/projects/{project}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_posix_account" not in self._inner_api_calls:
self._inner_api_calls[
"delete_posix_account"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_posix_account,
default_retry=self._method_configs["DeletePosixAccount"].retry,
default_timeout=self._method_configs["DeletePosixAccount"].timeout,
client_info=self._client_info,
)
request = oslogin_pb2.DeletePosixAccountRequest(name=name)
self._inner_api_calls["delete_posix_account"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
[
"def",
"delete_posix_account",
"(",
"self",
",",
"name",
",",
"retry",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"timeout",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"DEFAULT",
",",
"metadata",
"=",
"None",
",",
")",
":",
"# Wrap the transport method to add retry and timeout logic.",
"if",
"\"delete_posix_account\"",
"not",
"in",
"self",
".",
"_inner_api_calls",
":",
"self",
".",
"_inner_api_calls",
"[",
"\"delete_posix_account\"",
"]",
"=",
"google",
".",
"api_core",
".",
"gapic_v1",
".",
"method",
".",
"wrap_method",
"(",
"self",
".",
"transport",
".",
"delete_posix_account",
",",
"default_retry",
"=",
"self",
".",
"_method_configs",
"[",
"\"DeletePosixAccount\"",
"]",
".",
"retry",
",",
"default_timeout",
"=",
"self",
".",
"_method_configs",
"[",
"\"DeletePosixAccount\"",
"]",
".",
"timeout",
",",
"client_info",
"=",
"self",
".",
"_client_info",
",",
")",
"request",
"=",
"oslogin_pb2",
".",
"DeletePosixAccountRequest",
"(",
"name",
"=",
"name",
")",
"self",
".",
"_inner_api_calls",
"[",
"\"delete_posix_account\"",
"]",
"(",
"request",
",",
"retry",
"=",
"retry",
",",
"timeout",
"=",
"timeout",
",",
"metadata",
"=",
"metadata",
")"
] |
Deletes a POSIX account.
Example:
>>> from google.cloud import oslogin_v1
>>>
>>> client = oslogin_v1.OsLoginServiceClient()
>>>
>>> name = client.project_path('[USER]', '[PROJECT]')
>>>
>>> client.delete_posix_account(name)
Args:
name (str): A reference to the POSIX account to update. POSIX accounts are
identified by the project ID they are associated with. A reference to
the POSIX account is in format ``users/{user}/projects/{project}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
[
"Deletes",
"a",
"POSIX",
"account",
"."
] |
python
|
train
| 44.166667 |
franciscogarate/pyliferisk
|
pyliferisk/__init__.py
|
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L272-L279
|
def Mx(mt, x):
""" Return the Mx """
n = len(mt.Cx)
sum1 = 0
for j in range(x, n):
k = mt.Cx[j]
sum1 += k
return sum1
|
[
"def",
"Mx",
"(",
"mt",
",",
"x",
")",
":",
"n",
"=",
"len",
"(",
"mt",
".",
"Cx",
")",
"sum1",
"=",
"0",
"for",
"j",
"in",
"range",
"(",
"x",
",",
"n",
")",
":",
"k",
"=",
"mt",
".",
"Cx",
"[",
"j",
"]",
"sum1",
"+=",
"k",
"return",
"sum1"
] |
Return the Mx
|
[
"Return",
"the",
"Mx"
] |
python
|
train
| 18.25 |
peterwittek/ncpol2sdpa
|
ncpol2sdpa/nc_utils.py
|
https://github.com/peterwittek/ncpol2sdpa/blob/bce75d524d0b9d0093f32e3a0a5611f8589351a7/ncpol2sdpa/nc_utils.py#L604-L614
|
def convert_relational(relational):
"""Convert all inequalities to >=0 form.
"""
rel = relational.rel_op
if rel in ['==', '>=', '>']:
return relational.lhs-relational.rhs
elif rel in ['<=', '<']:
return relational.rhs-relational.lhs
else:
raise Exception("The relational operation ' + rel + ' is not "
"implemented!")
|
[
"def",
"convert_relational",
"(",
"relational",
")",
":",
"rel",
"=",
"relational",
".",
"rel_op",
"if",
"rel",
"in",
"[",
"'=='",
",",
"'>='",
",",
"'>'",
"]",
":",
"return",
"relational",
".",
"lhs",
"-",
"relational",
".",
"rhs",
"elif",
"rel",
"in",
"[",
"'<='",
",",
"'<'",
"]",
":",
"return",
"relational",
".",
"rhs",
"-",
"relational",
".",
"lhs",
"else",
":",
"raise",
"Exception",
"(",
"\"The relational operation ' + rel + ' is not \"",
"\"implemented!\"",
")"
] |
Convert all inequalities to >=0 form.
|
[
"Convert",
"all",
"inequalities",
"to",
">",
"=",
"0",
"form",
"."
] |
python
|
train
| 34.454545 |
pandas-dev/pandas
|
pandas/core/frame.py
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3659-L3708
|
def lookup(self, row_labels, col_labels):
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = [df.get_value(row, col)
for row, col in zip(row_labels, col_labels)]
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
|
[
"def",
"lookup",
"(",
"self",
",",
"row_labels",
",",
"col_labels",
")",
":",
"n",
"=",
"len",
"(",
"row_labels",
")",
"if",
"n",
"!=",
"len",
"(",
"col_labels",
")",
":",
"raise",
"ValueError",
"(",
"'Row labels must have same size as column labels'",
")",
"thresh",
"=",
"1000",
"if",
"not",
"self",
".",
"_is_mixed_type",
"or",
"n",
">",
"thresh",
":",
"values",
"=",
"self",
".",
"values",
"ridx",
"=",
"self",
".",
"index",
".",
"get_indexer",
"(",
"row_labels",
")",
"cidx",
"=",
"self",
".",
"columns",
".",
"get_indexer",
"(",
"col_labels",
")",
"if",
"(",
"ridx",
"==",
"-",
"1",
")",
".",
"any",
"(",
")",
":",
"raise",
"KeyError",
"(",
"'One or more row labels was not found'",
")",
"if",
"(",
"cidx",
"==",
"-",
"1",
")",
".",
"any",
"(",
")",
":",
"raise",
"KeyError",
"(",
"'One or more column labels was not found'",
")",
"flat_index",
"=",
"ridx",
"*",
"len",
"(",
"self",
".",
"columns",
")",
"+",
"cidx",
"result",
"=",
"values",
".",
"flat",
"[",
"flat_index",
"]",
"else",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"n",
",",
"dtype",
"=",
"'O'",
")",
"for",
"i",
",",
"(",
"r",
",",
"c",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"row_labels",
",",
"col_labels",
")",
")",
":",
"result",
"[",
"i",
"]",
"=",
"self",
".",
"_get_value",
"(",
"r",
",",
"c",
")",
"if",
"is_object_dtype",
"(",
"result",
")",
":",
"result",
"=",
"lib",
".",
"maybe_convert_objects",
"(",
"result",
")",
"return",
"result"
] |
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = [df.get_value(row, col)
for row, col in zip(row_labels, col_labels)]
Examples
--------
values : ndarray
The found values
|
[
"Label",
"-",
"based",
"fancy",
"indexing",
"function",
"for",
"DataFrame",
"."
] |
python
|
train
| 32.44 |
tornadoweb/tornado
|
demos/blog/blog.py
|
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/demos/blog/blog.py#L99-L112
|
async def query(self, stmt, *args):
"""Query for a list of results.
Typical usage::
results = await self.query(...)
Or::
for row in await self.query(...)
"""
with (await self.application.db.cursor()) as cur:
await cur.execute(stmt, args)
return [self.row_to_obj(row, cur) for row in await cur.fetchall()]
|
[
"async",
"def",
"query",
"(",
"self",
",",
"stmt",
",",
"*",
"args",
")",
":",
"with",
"(",
"await",
"self",
".",
"application",
".",
"db",
".",
"cursor",
"(",
")",
")",
"as",
"cur",
":",
"await",
"cur",
".",
"execute",
"(",
"stmt",
",",
"args",
")",
"return",
"[",
"self",
".",
"row_to_obj",
"(",
"row",
",",
"cur",
")",
"for",
"row",
"in",
"await",
"cur",
".",
"fetchall",
"(",
")",
"]"
] |
Query for a list of results.
Typical usage::
results = await self.query(...)
Or::
for row in await self.query(...)
|
[
"Query",
"for",
"a",
"list",
"of",
"results",
"."
] |
python
|
train
| 27.357143 |
EwilDawe/typy
|
typy/keyboard.py
|
https://github.com/EwilDawe/typy/blob/0349e7176567a4dbef318e75d9b3d6868950a1a9/typy/keyboard.py#L11-L21
|
def press(*keys):
"""
Simulates a key-press for all the keys passed to the function
:param keys: list of keys to be pressed
:return: None
"""
for key in keys:
win32api.keybd_event(codes[key], 0, 0, 0)
release(key)
|
[
"def",
"press",
"(",
"*",
"keys",
")",
":",
"for",
"key",
"in",
"keys",
":",
"win32api",
".",
"keybd_event",
"(",
"codes",
"[",
"key",
"]",
",",
"0",
",",
"0",
",",
"0",
")",
"release",
"(",
"key",
")"
] |
Simulates a key-press for all the keys passed to the function
:param keys: list of keys to be pressed
:return: None
|
[
"Simulates",
"a",
"key",
"-",
"press",
"for",
"all",
"the",
"keys",
"passed",
"to",
"the",
"function"
] |
python
|
train
| 22.272727 |
DLR-RM/RAFCON
|
source/rafcon/gui/models/container_state.py
|
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/container_state.py#L100-L104
|
def _load_transition_models(self):
""" Adds models for each transition of the state """
self.transitions = []
for transition in self.state.transitions.values():
self._add_model(self.transitions, transition, TransitionModel)
|
[
"def",
"_load_transition_models",
"(",
"self",
")",
":",
"self",
".",
"transitions",
"=",
"[",
"]",
"for",
"transition",
"in",
"self",
".",
"state",
".",
"transitions",
".",
"values",
"(",
")",
":",
"self",
".",
"_add_model",
"(",
"self",
".",
"transitions",
",",
"transition",
",",
"TransitionModel",
")"
] |
Adds models for each transition of the state
|
[
"Adds",
"models",
"for",
"each",
"transition",
"of",
"the",
"state"
] |
python
|
train
| 51 |
data61/clkhash
|
clkhash/cli.py
|
https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/cli.py#L182-L202
|
def create(server, name, project, apikey, output, threshold, verbose):
"""Create a new run on an entity matching server.
See entity matching service documentation for details on threshold.
Returns details for the created run.
"""
if verbose:
log("Entity Matching Server: {}".format(server))
if threshold is None:
raise ValueError("Please provide a threshold")
# Create a new run
try:
response = run_create(server, project, apikey, threshold, name)
except ServiceError as e:
log("Unexpected response with status {}".format(e.status_code))
log(e.text)
else:
json.dump(response, output)
|
[
"def",
"create",
"(",
"server",
",",
"name",
",",
"project",
",",
"apikey",
",",
"output",
",",
"threshold",
",",
"verbose",
")",
":",
"if",
"verbose",
":",
"log",
"(",
"\"Entity Matching Server: {}\"",
".",
"format",
"(",
"server",
")",
")",
"if",
"threshold",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Please provide a threshold\"",
")",
"# Create a new run",
"try",
":",
"response",
"=",
"run_create",
"(",
"server",
",",
"project",
",",
"apikey",
",",
"threshold",
",",
"name",
")",
"except",
"ServiceError",
"as",
"e",
":",
"log",
"(",
"\"Unexpected response with status {}\"",
".",
"format",
"(",
"e",
".",
"status_code",
")",
")",
"log",
"(",
"e",
".",
"text",
")",
"else",
":",
"json",
".",
"dump",
"(",
"response",
",",
"output",
")"
] |
Create a new run on an entity matching server.
See entity matching service documentation for details on threshold.
Returns details for the created run.
|
[
"Create",
"a",
"new",
"run",
"on",
"an",
"entity",
"matching",
"server",
"."
] |
python
|
train
| 31.190476 |
maxalbert/tohu
|
tohu/v2/custom_generator_NEW.py
|
https://github.com/maxalbert/tohu/blob/43380162fadec99cdd5c5c3152dd6b7d3a9d39a8/tohu/v2/custom_generator_NEW.py#L129-L136
|
def make_item_class_for_custom_generator_class(cls):
"""
cls:
The custom generator class for which to create an item-class
"""
clsname = cls.__tohu_items_name__
attr_names = cls.field_gens.keys()
return make_item_class(clsname, attr_names)
|
[
"def",
"make_item_class_for_custom_generator_class",
"(",
"cls",
")",
":",
"clsname",
"=",
"cls",
".",
"__tohu_items_name__",
"attr_names",
"=",
"cls",
".",
"field_gens",
".",
"keys",
"(",
")",
"return",
"make_item_class",
"(",
"clsname",
",",
"attr_names",
")"
] |
cls:
The custom generator class for which to create an item-class
|
[
"cls",
":",
"The",
"custom",
"generator",
"class",
"for",
"which",
"to",
"create",
"an",
"item",
"-",
"class"
] |
python
|
train
| 33 |
neherlab/treetime
|
treetime/node_interpolator.py
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/node_interpolator.py#L137-L155
|
def _evaluate_convolution(t_val, f, g, n_integral = 100, inverse_time=None, return_log=False):
"""
Calculate convolution F(t) = int { f(tau)g(t-tau) } dtau
"""
FG = _convolution_integrand(t_val, f, g, inverse_time, return_log)
#integrate the interpolation object, return log, make neg_log
#print('FG:',FG.xmin, FG.xmax, FG(FG.xmin), FG(FG.xmax))
if (return_log and FG == ttconf.BIG_NUMBER) or \
(not return_log and FG == 0.0): # distributions do not overlap
res = ttconf.BIG_NUMBER # we integrate log funcitons
else:
res = -FG.integrate(a=FG.xmin, b=FG.xmax, n=n_integral, return_log=True)
if return_log:
return res, -1
else:
return np.exp(-res), -1
|
[
"def",
"_evaluate_convolution",
"(",
"t_val",
",",
"f",
",",
"g",
",",
"n_integral",
"=",
"100",
",",
"inverse_time",
"=",
"None",
",",
"return_log",
"=",
"False",
")",
":",
"FG",
"=",
"_convolution_integrand",
"(",
"t_val",
",",
"f",
",",
"g",
",",
"inverse_time",
",",
"return_log",
")",
"#integrate the interpolation object, return log, make neg_log",
"#print('FG:',FG.xmin, FG.xmax, FG(FG.xmin), FG(FG.xmax))",
"if",
"(",
"return_log",
"and",
"FG",
"==",
"ttconf",
".",
"BIG_NUMBER",
")",
"or",
"(",
"not",
"return_log",
"and",
"FG",
"==",
"0.0",
")",
":",
"# distributions do not overlap",
"res",
"=",
"ttconf",
".",
"BIG_NUMBER",
"# we integrate log funcitons",
"else",
":",
"res",
"=",
"-",
"FG",
".",
"integrate",
"(",
"a",
"=",
"FG",
".",
"xmin",
",",
"b",
"=",
"FG",
".",
"xmax",
",",
"n",
"=",
"n_integral",
",",
"return_log",
"=",
"True",
")",
"if",
"return_log",
":",
"return",
"res",
",",
"-",
"1",
"else",
":",
"return",
"np",
".",
"exp",
"(",
"-",
"res",
")",
",",
"-",
"1"
] |
Calculate convolution F(t) = int { f(tau)g(t-tau) } dtau
|
[
"Calculate",
"convolution",
"F",
"(",
"t",
")",
"=",
"int",
"{",
"f",
"(",
"tau",
")",
"g",
"(",
"t",
"-",
"tau",
")",
"}",
"dtau"
] |
python
|
test
| 37.842105 |
calmjs/calmjs
|
src/calmjs/toolchain.py
|
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/toolchain.py#L1225-L1244
|
def compile_loaderplugin_entry(self, spec, entry):
"""
Generic loader plugin entry handler.
The default implementation assumes that everything up to the
first '!' symbol resolves to some known loader plugin within
the registry.
The registry instance responsible for the resolution of the
loader plugin handlers must be available in the spec under
CALMJS_LOADERPLUGIN_REGISTRY
"""
modname, source, target, modpath = entry
handler = spec[CALMJS_LOADERPLUGIN_REGISTRY].get(modname)
if handler:
return handler(self, spec, modname, source, target, modpath)
logger.warning(
"no loaderplugin handler found for plugin entry '%s'", modname)
return {}, {}, []
|
[
"def",
"compile_loaderplugin_entry",
"(",
"self",
",",
"spec",
",",
"entry",
")",
":",
"modname",
",",
"source",
",",
"target",
",",
"modpath",
"=",
"entry",
"handler",
"=",
"spec",
"[",
"CALMJS_LOADERPLUGIN_REGISTRY",
"]",
".",
"get",
"(",
"modname",
")",
"if",
"handler",
":",
"return",
"handler",
"(",
"self",
",",
"spec",
",",
"modname",
",",
"source",
",",
"target",
",",
"modpath",
")",
"logger",
".",
"warning",
"(",
"\"no loaderplugin handler found for plugin entry '%s'\"",
",",
"modname",
")",
"return",
"{",
"}",
",",
"{",
"}",
",",
"[",
"]"
] |
Generic loader plugin entry handler.
The default implementation assumes that everything up to the
first '!' symbol resolves to some known loader plugin within
the registry.
The registry instance responsible for the resolution of the
loader plugin handlers must be available in the spec under
CALMJS_LOADERPLUGIN_REGISTRY
|
[
"Generic",
"loader",
"plugin",
"entry",
"handler",
"."
] |
python
|
train
| 38.45 |
summa-tx/riemann
|
riemann/encoding/cashaddr.py
|
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/encoding/cashaddr.py#L48-L66
|
def decode(data):
'''
str -> bytes
'''
if riemann.network.CASHADDR_PREFIX is None:
raise ValueError('Network {} does not support cashaddresses.'
.format(riemann.get_current_network_name()))
if data.find(riemann.network.CASHADDR_PREFIX) != 0:
raise ValueError('Malformed cashaddr. Cannot locate prefix: {}'
.format(riemann.netowrk.CASHADDR_PREFIX))
# the data is everything after the colon
prefix, data = data.split(':')
decoded = b32decode(data)
if not verify_checksum(prefix, decoded):
raise ValueError('Bad cash address checksum')
converted = convertbits(decoded, 5, 8)
return bytes(converted[:-6])
|
[
"def",
"decode",
"(",
"data",
")",
":",
"if",
"riemann",
".",
"network",
".",
"CASHADDR_PREFIX",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Network {} does not support cashaddresses.'",
".",
"format",
"(",
"riemann",
".",
"get_current_network_name",
"(",
")",
")",
")",
"if",
"data",
".",
"find",
"(",
"riemann",
".",
"network",
".",
"CASHADDR_PREFIX",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"'Malformed cashaddr. Cannot locate prefix: {}'",
".",
"format",
"(",
"riemann",
".",
"netowrk",
".",
"CASHADDR_PREFIX",
")",
")",
"# the data is everything after the colon",
"prefix",
",",
"data",
"=",
"data",
".",
"split",
"(",
"':'",
")",
"decoded",
"=",
"b32decode",
"(",
"data",
")",
"if",
"not",
"verify_checksum",
"(",
"prefix",
",",
"decoded",
")",
":",
"raise",
"ValueError",
"(",
"'Bad cash address checksum'",
")",
"converted",
"=",
"convertbits",
"(",
"decoded",
",",
"5",
",",
"8",
")",
"return",
"bytes",
"(",
"converted",
"[",
":",
"-",
"6",
"]",
")"
] |
str -> bytes
|
[
"str",
"-",
">",
"bytes"
] |
python
|
train
| 36.947368 |
sony/nnabla
|
python/src/nnabla/utils/learning_rate_scheduler.py
|
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/learning_rate_scheduler.py#L118-L129
|
def get_learning_rate(self, iter):
'''
Get learning rate with exponential decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate
'''
return self.init_lr * (self.gamma ** (iter // self.iter_interval))
|
[
"def",
"get_learning_rate",
"(",
"self",
",",
"iter",
")",
":",
"return",
"self",
".",
"init_lr",
"*",
"(",
"self",
".",
"gamma",
"**",
"(",
"iter",
"//",
"self",
".",
"iter_interval",
")",
")"
] |
Get learning rate with exponential decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate
|
[
"Get",
"learning",
"rate",
"with",
"exponential",
"decay",
"based",
"on",
"current",
"iteration",
"."
] |
python
|
train
| 27.25 |
cimatosa/progression
|
progression/progress.py
|
https://github.com/cimatosa/progression/blob/82cf74a25a47f9bda96157cc2c88e5975c20b41d/progression/progress.py#L608-L638
|
def _show_stat_wrapper_multi_Progress(count, last_count, start_time, max_count, speed_calc_cycles,
width, q, last_speed, prepend, show_stat_function, len_,
add_args, lock, info_line, no_move_up=False):
"""
call the static method show_stat_wrapper for each process
"""
# print(ESC_BOLD, end='')
# sys.stdout.flush()
for i in range(len_):
_show_stat_wrapper_Progress(count[i], last_count[i], start_time[i], max_count[i], speed_calc_cycles,
width, q[i], last_speed[i], prepend[i], show_stat_function,
add_args, i, lock[i])
n = len_
if info_line is not None:
s = info_line.value.decode('utf-8')
s = s.split('\n')
n += len(s)
for si in s:
if width == 'auto':
width = get_terminal_width()
if len(si) > width:
si = si[:width]
print("{0:<{1}}".format(si, width))
if no_move_up:
n = 0
# this is only a hack to find the end
# of the message in a stream
# so ESC_HIDDEN+ESC_NO_CHAR_ATTR is a magic ending
print(terminal.ESC_MOVE_LINE_UP(n) + terminal.ESC_MY_MAGIC_ENDING, end='')
sys.stdout.flush()
|
[
"def",
"_show_stat_wrapper_multi_Progress",
"(",
"count",
",",
"last_count",
",",
"start_time",
",",
"max_count",
",",
"speed_calc_cycles",
",",
"width",
",",
"q",
",",
"last_speed",
",",
"prepend",
",",
"show_stat_function",
",",
"len_",
",",
"add_args",
",",
"lock",
",",
"info_line",
",",
"no_move_up",
"=",
"False",
")",
":",
"# print(ESC_BOLD, end='')",
"# sys.stdout.flush()",
"for",
"i",
"in",
"range",
"(",
"len_",
")",
":",
"_show_stat_wrapper_Progress",
"(",
"count",
"[",
"i",
"]",
",",
"last_count",
"[",
"i",
"]",
",",
"start_time",
"[",
"i",
"]",
",",
"max_count",
"[",
"i",
"]",
",",
"speed_calc_cycles",
",",
"width",
",",
"q",
"[",
"i",
"]",
",",
"last_speed",
"[",
"i",
"]",
",",
"prepend",
"[",
"i",
"]",
",",
"show_stat_function",
",",
"add_args",
",",
"i",
",",
"lock",
"[",
"i",
"]",
")",
"n",
"=",
"len_",
"if",
"info_line",
"is",
"not",
"None",
":",
"s",
"=",
"info_line",
".",
"value",
".",
"decode",
"(",
"'utf-8'",
")",
"s",
"=",
"s",
".",
"split",
"(",
"'\\n'",
")",
"n",
"+=",
"len",
"(",
"s",
")",
"for",
"si",
"in",
"s",
":",
"if",
"width",
"==",
"'auto'",
":",
"width",
"=",
"get_terminal_width",
"(",
")",
"if",
"len",
"(",
"si",
")",
">",
"width",
":",
"si",
"=",
"si",
"[",
":",
"width",
"]",
"print",
"(",
"\"{0:<{1}}\"",
".",
"format",
"(",
"si",
",",
"width",
")",
")",
"if",
"no_move_up",
":",
"n",
"=",
"0",
"# this is only a hack to find the end",
"# of the message in a stream",
"# so ESC_HIDDEN+ESC_NO_CHAR_ATTR is a magic ending",
"print",
"(",
"terminal",
".",
"ESC_MOVE_LINE_UP",
"(",
"n",
")",
"+",
"terminal",
".",
"ESC_MY_MAGIC_ENDING",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] |
call the static method show_stat_wrapper for each process
|
[
"call",
"the",
"static",
"method",
"show_stat_wrapper",
"for",
"each",
"process"
] |
python
|
train
| 44.548387 |
asweigart/pyautogui
|
pyautogui/__init__.py
|
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L778-L827
|
def dragRel(xOffset=0, yOffset=0, duration=0.0, tween=linear, button='left', pause=None, _pause=True, mouseDownUp=True):
"""Performs a mouse drag (mouse movement while a button is held down) to a
point on the screen, relative to its current position.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed.
Which allows dragging over multiple (small) actions. 'True' by default.
Returns:
None
"""
if xOffset is None:
xOffset = 0
if yOffset is None:
yOffset = 0
if type(xOffset) in (tuple, list):
xOffset, yOffset = xOffset[0], xOffset[1]
if xOffset == 0 and yOffset == 0:
return # no-op case
_failSafeCheck()
mousex, mousey = platformModule._position()
if mouseDownUp:
mouseDown(button=button, _pause=False)
_mouseMoveDrag('drag', mousex, mousey, xOffset, yOffset, duration, tween, button)
if mouseDownUp:
mouseUp(button=button, _pause=False)
_autoPause(pause, _pause)
|
[
"def",
"dragRel",
"(",
"xOffset",
"=",
"0",
",",
"yOffset",
"=",
"0",
",",
"duration",
"=",
"0.0",
",",
"tween",
"=",
"linear",
",",
"button",
"=",
"'left'",
",",
"pause",
"=",
"None",
",",
"_pause",
"=",
"True",
",",
"mouseDownUp",
"=",
"True",
")",
":",
"if",
"xOffset",
"is",
"None",
":",
"xOffset",
"=",
"0",
"if",
"yOffset",
"is",
"None",
":",
"yOffset",
"=",
"0",
"if",
"type",
"(",
"xOffset",
")",
"in",
"(",
"tuple",
",",
"list",
")",
":",
"xOffset",
",",
"yOffset",
"=",
"xOffset",
"[",
"0",
"]",
",",
"xOffset",
"[",
"1",
"]",
"if",
"xOffset",
"==",
"0",
"and",
"yOffset",
"==",
"0",
":",
"return",
"# no-op case",
"_failSafeCheck",
"(",
")",
"mousex",
",",
"mousey",
"=",
"platformModule",
".",
"_position",
"(",
")",
"if",
"mouseDownUp",
":",
"mouseDown",
"(",
"button",
"=",
"button",
",",
"_pause",
"=",
"False",
")",
"_mouseMoveDrag",
"(",
"'drag'",
",",
"mousex",
",",
"mousey",
",",
"xOffset",
",",
"yOffset",
",",
"duration",
",",
"tween",
",",
"button",
")",
"if",
"mouseDownUp",
":",
"mouseUp",
"(",
"button",
"=",
"button",
",",
"_pause",
"=",
"False",
")",
"_autoPause",
"(",
"pause",
",",
"_pause",
")"
] |
Performs a mouse drag (mouse movement while a button is held down) to a
point on the screen, relative to its current position.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed.
Which allows dragging over multiple (small) actions. 'True' by default.
Returns:
None
|
[
"Performs",
"a",
"mouse",
"drag",
"(",
"mouse",
"movement",
"while",
"a",
"button",
"is",
"held",
"down",
")",
"to",
"a",
"point",
"on",
"the",
"screen",
"relative",
"to",
"its",
"current",
"position",
"."
] |
python
|
train
| 42.18 |
dpkp/kafka-python
|
kafka/client_async.py
|
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/client_async.py#L772-L827
|
def _maybe_refresh_metadata(self, wakeup=False):
"""Send a metadata request if needed.
Returns:
int: milliseconds until next refresh
"""
ttl = self.cluster.ttl()
wait_for_in_progress_ms = self.config['request_timeout_ms'] if self._metadata_refresh_in_progress else 0
metadata_timeout = max(ttl, wait_for_in_progress_ms)
if metadata_timeout > 0:
return metadata_timeout
# Beware that the behavior of this method and the computation of
# timeouts for poll() are highly dependent on the behavior of
# least_loaded_node()
node_id = self.least_loaded_node()
if node_id is None:
log.debug("Give up sending metadata request since no node is available");
return self.config['reconnect_backoff_ms']
if self._can_send_request(node_id):
topics = list(self._topics)
if not topics and self.cluster.is_bootstrap(node_id):
topics = list(self.config['bootstrap_topics_filter'])
if self.cluster.need_all_topic_metadata or not topics:
topics = [] if self.config['api_version'] < (0, 10) else None
api_version = 0 if self.config['api_version'] < (0, 10) else 1
request = MetadataRequest[api_version](topics)
log.debug("Sending metadata request %s to node %s", request, node_id)
future = self.send(node_id, request, wakeup=wakeup)
future.add_callback(self.cluster.update_metadata)
future.add_errback(self.cluster.failed_update)
self._metadata_refresh_in_progress = True
def refresh_done(val_or_error):
self._metadata_refresh_in_progress = False
future.add_callback(refresh_done)
future.add_errback(refresh_done)
return self.config['request_timeout_ms']
# If there's any connection establishment underway, wait until it completes. This prevents
# the client from unnecessarily connecting to additional nodes while a previous connection
# attempt has not been completed.
if self._connecting:
return self.config['reconnect_backoff_ms']
if self.maybe_connect(node_id, wakeup=wakeup):
log.debug("Initializing connection to node %s for metadata request", node_id)
return self.config['reconnect_backoff_ms']
# connected but can't send more, OR connecting
# In either case we just need to wait for a network event
# to let us know the selected connection might be usable again.
return float('inf')
|
[
"def",
"_maybe_refresh_metadata",
"(",
"self",
",",
"wakeup",
"=",
"False",
")",
":",
"ttl",
"=",
"self",
".",
"cluster",
".",
"ttl",
"(",
")",
"wait_for_in_progress_ms",
"=",
"self",
".",
"config",
"[",
"'request_timeout_ms'",
"]",
"if",
"self",
".",
"_metadata_refresh_in_progress",
"else",
"0",
"metadata_timeout",
"=",
"max",
"(",
"ttl",
",",
"wait_for_in_progress_ms",
")",
"if",
"metadata_timeout",
">",
"0",
":",
"return",
"metadata_timeout",
"# Beware that the behavior of this method and the computation of",
"# timeouts for poll() are highly dependent on the behavior of",
"# least_loaded_node()",
"node_id",
"=",
"self",
".",
"least_loaded_node",
"(",
")",
"if",
"node_id",
"is",
"None",
":",
"log",
".",
"debug",
"(",
"\"Give up sending metadata request since no node is available\"",
")",
"return",
"self",
".",
"config",
"[",
"'reconnect_backoff_ms'",
"]",
"if",
"self",
".",
"_can_send_request",
"(",
"node_id",
")",
":",
"topics",
"=",
"list",
"(",
"self",
".",
"_topics",
")",
"if",
"not",
"topics",
"and",
"self",
".",
"cluster",
".",
"is_bootstrap",
"(",
"node_id",
")",
":",
"topics",
"=",
"list",
"(",
"self",
".",
"config",
"[",
"'bootstrap_topics_filter'",
"]",
")",
"if",
"self",
".",
"cluster",
".",
"need_all_topic_metadata",
"or",
"not",
"topics",
":",
"topics",
"=",
"[",
"]",
"if",
"self",
".",
"config",
"[",
"'api_version'",
"]",
"<",
"(",
"0",
",",
"10",
")",
"else",
"None",
"api_version",
"=",
"0",
"if",
"self",
".",
"config",
"[",
"'api_version'",
"]",
"<",
"(",
"0",
",",
"10",
")",
"else",
"1",
"request",
"=",
"MetadataRequest",
"[",
"api_version",
"]",
"(",
"topics",
")",
"log",
".",
"debug",
"(",
"\"Sending metadata request %s to node %s\"",
",",
"request",
",",
"node_id",
")",
"future",
"=",
"self",
".",
"send",
"(",
"node_id",
",",
"request",
",",
"wakeup",
"=",
"wakeup",
")",
"future",
".",
"add_callback",
"(",
"self",
".",
"cluster",
".",
"update_metadata",
")",
"future",
".",
"add_errback",
"(",
"self",
".",
"cluster",
".",
"failed_update",
")",
"self",
".",
"_metadata_refresh_in_progress",
"=",
"True",
"def",
"refresh_done",
"(",
"val_or_error",
")",
":",
"self",
".",
"_metadata_refresh_in_progress",
"=",
"False",
"future",
".",
"add_callback",
"(",
"refresh_done",
")",
"future",
".",
"add_errback",
"(",
"refresh_done",
")",
"return",
"self",
".",
"config",
"[",
"'request_timeout_ms'",
"]",
"# If there's any connection establishment underway, wait until it completes. This prevents",
"# the client from unnecessarily connecting to additional nodes while a previous connection",
"# attempt has not been completed.",
"if",
"self",
".",
"_connecting",
":",
"return",
"self",
".",
"config",
"[",
"'reconnect_backoff_ms'",
"]",
"if",
"self",
".",
"maybe_connect",
"(",
"node_id",
",",
"wakeup",
"=",
"wakeup",
")",
":",
"log",
".",
"debug",
"(",
"\"Initializing connection to node %s for metadata request\"",
",",
"node_id",
")",
"return",
"self",
".",
"config",
"[",
"'reconnect_backoff_ms'",
"]",
"# connected but can't send more, OR connecting",
"# In either case we just need to wait for a network event",
"# to let us know the selected connection might be usable again.",
"return",
"float",
"(",
"'inf'",
")"
] |
Send a metadata request if needed.
Returns:
int: milliseconds until next refresh
|
[
"Send",
"a",
"metadata",
"request",
"if",
"needed",
"."
] |
python
|
train
| 46.410714 |
knipknap/SpiffWorkflow
|
SpiffWorkflow/serializer/xml.py
|
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/serializer/xml.py#L211-L224
|
def serialize_operator_not_equal(self, op):
"""
Serializer for :meth:`SpiffWorkflow.operators.NotEqual`.
Example::
<not-equals>
<value>text</value>
<value><attribute>foobar</attribute></value>
<value><path>foobar</path></value>
</not-equals>
"""
elem = etree.Element('not-equals')
return self.serialize_value_list(elem, op.args)
|
[
"def",
"serialize_operator_not_equal",
"(",
"self",
",",
"op",
")",
":",
"elem",
"=",
"etree",
".",
"Element",
"(",
"'not-equals'",
")",
"return",
"self",
".",
"serialize_value_list",
"(",
"elem",
",",
"op",
".",
"args",
")"
] |
Serializer for :meth:`SpiffWorkflow.operators.NotEqual`.
Example::
<not-equals>
<value>text</value>
<value><attribute>foobar</attribute></value>
<value><path>foobar</path></value>
</not-equals>
|
[
"Serializer",
"for",
":",
"meth",
":",
"SpiffWorkflow",
".",
"operators",
".",
"NotEqual",
"."
] |
python
|
valid
| 31.214286 |
edoburu/sphinxcontrib-django
|
sphinxcontrib_django/docstrings.py
|
https://github.com/edoburu/sphinxcontrib-django/blob/5116ac7f1510a76b1ff58cf7f8d2fab7d8bbe2a9/sphinxcontrib_django/docstrings.py#L113-L118
|
def _improve_class_docs(app, cls, lines):
"""Improve the documentation of a class."""
if issubclass(cls, models.Model):
_add_model_fields_as_params(app, cls, lines)
elif issubclass(cls, forms.Form):
_add_form_fields(cls, lines)
|
[
"def",
"_improve_class_docs",
"(",
"app",
",",
"cls",
",",
"lines",
")",
":",
"if",
"issubclass",
"(",
"cls",
",",
"models",
".",
"Model",
")",
":",
"_add_model_fields_as_params",
"(",
"app",
",",
"cls",
",",
"lines",
")",
"elif",
"issubclass",
"(",
"cls",
",",
"forms",
".",
"Form",
")",
":",
"_add_form_fields",
"(",
"cls",
",",
"lines",
")"
] |
Improve the documentation of a class.
|
[
"Improve",
"the",
"documentation",
"of",
"a",
"class",
"."
] |
python
|
train
| 41.666667 |
timstaley/voevent-parse
|
src/voeventparse/voevent.py
|
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L228-L252
|
def set_author(voevent, title=None, shortName=None, logoURL=None,
contactName=None, contactEmail=None, contactPhone=None,
contributor=None):
"""For setting fields in the detailed author description.
This can optionally be neglected if a well defined AuthorIVORN is supplied.
.. note:: Unusually for this library,
the args here use CamelCase naming convention,
since there's a direct mapping to the ``Author.*``
attributes to which they will be assigned.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
The rest of the arguments are strings corresponding to child elements.
"""
# We inspect all local variables except the voevent packet,
# Cycling through and assigning them on the Who.Author element.
AuthChildren = locals()
AuthChildren.pop('voevent')
if not voevent.xpath('Who/Author'):
etree.SubElement(voevent.Who, 'Author')
for k, v in AuthChildren.items():
if v is not None:
voevent.Who.Author[k] = v
|
[
"def",
"set_author",
"(",
"voevent",
",",
"title",
"=",
"None",
",",
"shortName",
"=",
"None",
",",
"logoURL",
"=",
"None",
",",
"contactName",
"=",
"None",
",",
"contactEmail",
"=",
"None",
",",
"contactPhone",
"=",
"None",
",",
"contributor",
"=",
"None",
")",
":",
"# We inspect all local variables except the voevent packet,",
"# Cycling through and assigning them on the Who.Author element.",
"AuthChildren",
"=",
"locals",
"(",
")",
"AuthChildren",
".",
"pop",
"(",
"'voevent'",
")",
"if",
"not",
"voevent",
".",
"xpath",
"(",
"'Who/Author'",
")",
":",
"etree",
".",
"SubElement",
"(",
"voevent",
".",
"Who",
",",
"'Author'",
")",
"for",
"k",
",",
"v",
"in",
"AuthChildren",
".",
"items",
"(",
")",
":",
"if",
"v",
"is",
"not",
"None",
":",
"voevent",
".",
"Who",
".",
"Author",
"[",
"k",
"]",
"=",
"v"
] |
For setting fields in the detailed author description.
This can optionally be neglected if a well defined AuthorIVORN is supplied.
.. note:: Unusually for this library,
the args here use CamelCase naming convention,
since there's a direct mapping to the ``Author.*``
attributes to which they will be assigned.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
The rest of the arguments are strings corresponding to child elements.
|
[
"For",
"setting",
"fields",
"in",
"the",
"detailed",
"author",
"description",
"."
] |
python
|
train
| 41.84 |
wummel/linkchecker
|
linkcheck/plugins/viruscheck.py
|
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/plugins/viruscheck.py#L86-L110
|
def new_scansock (self):
"""Return a connected socket for sending scan data to it."""
port = None
try:
self.sock.sendall("STREAM")
port = None
for dummy in range(60):
data = self.sock.recv(self.sock_rcvbuf)
i = data.find("PORT")
if i != -1:
port = int(data[i+5:])
break
except socket.error:
self.sock.close()
raise
if port is None:
raise ClamavError(_("clamd is not ready for stream scanning"))
sockinfo = get_sockinfo(self.host, port=port)
wsock = create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
wsock.connect(sockinfo[0][4])
except socket.error:
wsock.close()
raise
return wsock
|
[
"def",
"new_scansock",
"(",
"self",
")",
":",
"port",
"=",
"None",
"try",
":",
"self",
".",
"sock",
".",
"sendall",
"(",
"\"STREAM\"",
")",
"port",
"=",
"None",
"for",
"dummy",
"in",
"range",
"(",
"60",
")",
":",
"data",
"=",
"self",
".",
"sock",
".",
"recv",
"(",
"self",
".",
"sock_rcvbuf",
")",
"i",
"=",
"data",
".",
"find",
"(",
"\"PORT\"",
")",
"if",
"i",
"!=",
"-",
"1",
":",
"port",
"=",
"int",
"(",
"data",
"[",
"i",
"+",
"5",
":",
"]",
")",
"break",
"except",
"socket",
".",
"error",
":",
"self",
".",
"sock",
".",
"close",
"(",
")",
"raise",
"if",
"port",
"is",
"None",
":",
"raise",
"ClamavError",
"(",
"_",
"(",
"\"clamd is not ready for stream scanning\"",
")",
")",
"sockinfo",
"=",
"get_sockinfo",
"(",
"self",
".",
"host",
",",
"port",
"=",
"port",
")",
"wsock",
"=",
"create_socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"try",
":",
"wsock",
".",
"connect",
"(",
"sockinfo",
"[",
"0",
"]",
"[",
"4",
"]",
")",
"except",
"socket",
".",
"error",
":",
"wsock",
".",
"close",
"(",
")",
"raise",
"return",
"wsock"
] |
Return a connected socket for sending scan data to it.
|
[
"Return",
"a",
"connected",
"socket",
"for",
"sending",
"scan",
"data",
"to",
"it",
"."
] |
python
|
train
| 33.56 |
doakey3/DashTable
|
dashtable/html2data/restructify/process_tag.py
|
https://github.com/doakey3/DashTable/blob/744cfb6a717fa75a8092c83ebcd49b2668023681/dashtable/html2data/restructify/process_tag.py#L13-L36
|
def process_tag(node):
"""
Recursively go through a tag's children, converting them, then
convert the tag itself.
"""
text = ''
exceptions = ['table']
for element in node.children:
if isinstance(element, NavigableString):
text += element
elif not node.name in exceptions:
text += process_tag(element)
try:
convert_fn = globals()["convert_%s" % node.name.lower()]
text = convert_fn(node, text)
except KeyError:
pass
return text
|
[
"def",
"process_tag",
"(",
"node",
")",
":",
"text",
"=",
"''",
"exceptions",
"=",
"[",
"'table'",
"]",
"for",
"element",
"in",
"node",
".",
"children",
":",
"if",
"isinstance",
"(",
"element",
",",
"NavigableString",
")",
":",
"text",
"+=",
"element",
"elif",
"not",
"node",
".",
"name",
"in",
"exceptions",
":",
"text",
"+=",
"process_tag",
"(",
"element",
")",
"try",
":",
"convert_fn",
"=",
"globals",
"(",
")",
"[",
"\"convert_%s\"",
"%",
"node",
".",
"name",
".",
"lower",
"(",
")",
"]",
"text",
"=",
"convert_fn",
"(",
"node",
",",
"text",
")",
"except",
"KeyError",
":",
"pass",
"return",
"text"
] |
Recursively go through a tag's children, converting them, then
convert the tag itself.
|
[
"Recursively",
"go",
"through",
"a",
"tag",
"s",
"children",
"converting",
"them",
"then",
"convert",
"the",
"tag",
"itself",
"."
] |
python
|
train
| 21.375 |
EmbodiedCognition/py-c3d
|
c3d.py
|
https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L447-L465
|
def write(self, group_id, handle):
'''Write this parameter group, with parameters, to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group.
handle : file handle
An open, writable, binary file handle.
'''
name = self.name.encode('utf-8')
desc = self.desc.encode('utf-8')
handle.write(struct.pack('bb', len(name), -group_id))
handle.write(name)
handle.write(struct.pack('<h', 3 + len(desc)))
handle.write(struct.pack('B', len(desc)))
handle.write(desc)
for param in self.params.values():
param.write(group_id, handle)
|
[
"def",
"write",
"(",
"self",
",",
"group_id",
",",
"handle",
")",
":",
"name",
"=",
"self",
".",
"name",
".",
"encode",
"(",
"'utf-8'",
")",
"desc",
"=",
"self",
".",
"desc",
".",
"encode",
"(",
"'utf-8'",
")",
"handle",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'bb'",
",",
"len",
"(",
"name",
")",
",",
"-",
"group_id",
")",
")",
"handle",
".",
"write",
"(",
"name",
")",
"handle",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'<h'",
",",
"3",
"+",
"len",
"(",
"desc",
")",
")",
")",
"handle",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"'B'",
",",
"len",
"(",
"desc",
")",
")",
")",
"handle",
".",
"write",
"(",
"desc",
")",
"for",
"param",
"in",
"self",
".",
"params",
".",
"values",
"(",
")",
":",
"param",
".",
"write",
"(",
"group_id",
",",
"handle",
")"
] |
Write this parameter group, with parameters, to a file handle.
Parameters
----------
group_id : int
The numerical ID of the group.
handle : file handle
An open, writable, binary file handle.
|
[
"Write",
"this",
"parameter",
"group",
"with",
"parameters",
"to",
"a",
"file",
"handle",
"."
] |
python
|
train
| 35.526316 |
coleifer/walrus
|
walrus/cache.py
|
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/cache.py#L82-L85
|
def delete(self, key):
"""Remove the given key from the cache."""
if not self.debug:
self.database.delete(self.make_key(key))
|
[
"def",
"delete",
"(",
"self",
",",
"key",
")",
":",
"if",
"not",
"self",
".",
"debug",
":",
"self",
".",
"database",
".",
"delete",
"(",
"self",
".",
"make_key",
"(",
"key",
")",
")"
] |
Remove the given key from the cache.
|
[
"Remove",
"the",
"given",
"key",
"from",
"the",
"cache",
"."
] |
python
|
train
| 37.5 |
msmbuilder/msmbuilder
|
msmbuilder/preprocessing/base.py
|
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/preprocessing/base.py#L146-L160
|
def fit(self, X, y=None):
"""Fit Preprocessing to X.
Parameters
----------
sequence : array-like, [sequence_length, n_features]
A multivariate timeseries.
y : None
Ignored
Returns
-------
self
"""
return self.partial_fit(np.concatenate(X, axis=0))
|
[
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"return",
"self",
".",
"partial_fit",
"(",
"np",
".",
"concatenate",
"(",
"X",
",",
"axis",
"=",
"0",
")",
")"
] |
Fit Preprocessing to X.
Parameters
----------
sequence : array-like, [sequence_length, n_features]
A multivariate timeseries.
y : None
Ignored
Returns
-------
self
|
[
"Fit",
"Preprocessing",
"to",
"X",
"."
] |
python
|
train
| 22.6 |
ANTsX/ANTsPy
|
ants/viz/plot.py
|
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/viz/plot.py#L127-L342
|
def plot_grid(images, slices=None, axes=2,
# general figure arguments
figsize=1., rpad=0, cpad=0,
# title arguments
title=None, tfontsize=20, title_dx=0, title_dy=0,
# row arguments
rlabels=None, rfontsize=14, rfontcolor='white', rfacecolor='black',
# column arguments
clabels=None, cfontsize=14, cfontcolor='white', cfacecolor='black',
# save arguments
filename=None, dpi=400, transparent=True,
# other args
**kwargs):
"""
Plot a collection of images in an arbitrarily-defined grid
Matplotlib named colors: https://matplotlib.org/examples/color/named_colors.html
Arguments
---------
images : list of ANTsImage types
image(s) to plot.
if one image, this image will be used for all grid locations.
if multiple images, they should be arrange in a list the same
shape as the `gridsize` argument.
slices : integer or list of integers
slice indices to plot
if one integer, this slice index will be used for all images
if multiple integers, they should be arranged in a list the same
shape as the `gridsize` argument
axes : integer or list of integers
axis or axes along which to plot image slices
if one integer, this axis will be used for all images
if multiple integers, they should be arranged in a list the same
shape as the `gridsize` argument
Example
-------
>>> import ants
>>> import numpy as np
>>> mni1 = ants.image_read(ants.get_data('mni'))
>>> mni2 = mni1.smooth_image(1.)
>>> mni3 = mni1.smooth_image(2.)
>>> mni4 = mni1.smooth_image(3.)
>>> images = np.asarray([[mni1, mni2],
... [mni3, mni4]])
>>> slices = np.asarray([[100, 100],
... [100, 100]])
>>> #axes = np.asarray([[2,2],[2,2]])
>>> # standard plotting
>>> ants.plot_grid(images=images, slices=slices, title='2x2 Grid')
>>> ants.plot_grid(images.reshape(1,4), slices.reshape(1,4), title='1x4 Grid')
>>> ants.plot_grid(images.reshape(4,1), slices.reshape(4,1), title='4x1 Grid')
>>> # Padding between rows and/or columns
>>> ants.plot_grid(images, slices, cpad=0.02, title='Col Padding')
>>> ants.plot_grid(images, slices, rpad=0.02, title='Row Padding')
>>> ants.plot_grid(images, slices, rpad=0.02, cpad=0.02, title='Row and Col Padding')
>>> # Adding plain row and/or column labels
>>> ants.plot_grid(images, slices, title='Adding Row Labels', rlabels=['Row #1', 'Row #2'])
>>> ants.plot_grid(images, slices, title='Adding Col Labels', clabels=['Col #1', 'Col #2'])
>>> ants.plot_grid(images, slices, title='Row and Col Labels',
rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2'])
>>> # Making a publication-quality image
>>> images = np.asarray([[mni1, mni2, mni2],
... [mni3, mni4, mni4]])
>>> slices = np.asarray([[100, 100, 100],
... [100, 100, 100]])
>>> axes = np.asarray([[0, 1, 2],
[0, 1, 2]])
>>> ants.plot_grid(images, slices, axes, title='Publication Figures with ANTsPy',
tfontsize=20, title_dy=0.03, title_dx=-0.04,
rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2', 'Col 3'],
rfontsize=16, cfontsize=16)
"""
def mirror_matrix(x):
return x[::-1,:]
def rotate270_matrix(x):
return mirror_matrix(x.T)
def rotate180_matrix(x):
return x[::-1,:]
def rotate90_matrix(x):
return mirror_matrix(x).T
def flip_matrix(x):
return mirror_matrix(rotate180_matrix(x))
def reorient_slice(x, axis):
if (axis != 1):
x = rotate90_matrix(x)
if (axis == 1):
x = rotate90_matrix(x)
x = mirror_matrix(x)
return x
def slice_image(img, axis, idx):
if axis == 0:
return img[idx,:,:]
elif axis == 1:
return img[:,idx,:]
elif axis == 2:
return img[:,:,idx]
elif axis == -1:
return img[:,:,idx]
elif axis == -2:
return img[:,idx,:]
elif axis == -3:
return img[idx,:,:]
else:
raise ValueError('axis %i not valid' % axis)
if isinstance(images, np.ndarray):
images = images.tolist()
if not isinstance(images, list):
raise ValueError('images argument must be of type list')
if not isinstance(images[0], list):
images = [images]
if isinstance(slices, int):
one_slice = True
if isinstance(slices, np.ndarray):
slices = slices.tolist()
if isinstance(slices, list):
one_slice = False
if not isinstance(slices[0], list):
slices = [slices]
nslicerow = len(slices)
nslicecol = len(slices[0])
nrow = len(images)
ncol = len(images[0])
if rlabels is None:
rlabels = [None]*nrow
if clabels is None:
clabels = [None]*ncol
if (not one_slice):
if (nrow != nslicerow) or (ncol != nslicecol):
raise ValueError('`images` arg shape (%i,%i) must equal `slices` arg shape (%i,%i)!' % (nrow,ncol,nslicerow,nslicecol))
fig = plt.figure(figsize=((ncol+1)*2.5*figsize, (nrow+1)*2.5*figsize))
if title is not None:
basex = 0.5
basey = 0.9 if clabels[0] is None else 0.95
fig.suptitle(title, fontsize=tfontsize, x=basex+title_dx, y=basey+title_dy)
if (cpad > 0) and (rpad > 0):
bothgridpad = max(cpad, rpad)
cpad = 0
rpad = 0
else:
bothgridpad = 0.0
gs = gridspec.GridSpec(nrow, ncol, wspace=bothgridpad, hspace=0.0,
top=1.-0.5/(nrow+1), bottom=0.5/(nrow+1) + cpad,
left=0.5/(ncol+1) + rpad, right=1-0.5/(ncol+1))
for rowidx in range(nrow):
for colidx in range(ncol):
ax = plt.subplot(gs[rowidx, colidx])
if colidx == 0:
if rlabels[rowidx] is not None:
bottom, height = .25, .5
top = bottom + height
# add label text
ax.text(-0.07, 0.5*(bottom+top), rlabels[rowidx],
horizontalalignment='right', verticalalignment='center',
rotation='vertical', transform=ax.transAxes,
color=rfontcolor, fontsize=rfontsize)
# add label background
extra = 0.3 if rowidx == 0 else 0.0
rect = patches.Rectangle((-0.3, 0), 0.3, 1.0+extra,
facecolor=rfacecolor,
alpha=1., transform=ax.transAxes, clip_on=False)
ax.add_patch(rect)
if rowidx == 0:
if clabels[colidx] is not None:
bottom, height = .25, .5
left, width = .25, .5
right = left + width
top = bottom + height
ax.text(0.5*(left+right), 0.09+top+bottom, clabels[colidx],
horizontalalignment='center', verticalalignment='center',
rotation='horizontal', transform=ax.transAxes,
color=cfontcolor, fontsize=cfontsize)
# add label background
rect = patches.Rectangle((0, 1.), 1.0, 0.3,
facecolor=cfacecolor,
alpha=1., transform=ax.transAxes, clip_on=False)
ax.add_patch(rect)
tmpimg = images[rowidx][colidx]
if isinstance(axes, int):
tmpaxis = axes
else:
tmpaxis = axes[rowidx][colidx]
sliceidx = slices[rowidx][colidx] if not one_slice else slices
tmpslice = slice_image(tmpimg, tmpaxis, sliceidx)
tmpslice = reorient_slice(tmpslice, tmpaxis)
ax.imshow(tmpslice, cmap='Greys_r', aspect='auto')
ax.axis('off')
if filename is not None:
filename = os.path.expanduser(filename)
plt.savefig(filename, dpi=dpi, transparent=transparent, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
|
[
"def",
"plot_grid",
"(",
"images",
",",
"slices",
"=",
"None",
",",
"axes",
"=",
"2",
",",
"# general figure arguments",
"figsize",
"=",
"1.",
",",
"rpad",
"=",
"0",
",",
"cpad",
"=",
"0",
",",
"# title arguments",
"title",
"=",
"None",
",",
"tfontsize",
"=",
"20",
",",
"title_dx",
"=",
"0",
",",
"title_dy",
"=",
"0",
",",
"# row arguments",
"rlabels",
"=",
"None",
",",
"rfontsize",
"=",
"14",
",",
"rfontcolor",
"=",
"'white'",
",",
"rfacecolor",
"=",
"'black'",
",",
"# column arguments ",
"clabels",
"=",
"None",
",",
"cfontsize",
"=",
"14",
",",
"cfontcolor",
"=",
"'white'",
",",
"cfacecolor",
"=",
"'black'",
",",
"# save arguments",
"filename",
"=",
"None",
",",
"dpi",
"=",
"400",
",",
"transparent",
"=",
"True",
",",
"# other args",
"*",
"*",
"kwargs",
")",
":",
"def",
"mirror_matrix",
"(",
"x",
")",
":",
"return",
"x",
"[",
":",
":",
"-",
"1",
",",
":",
"]",
"def",
"rotate270_matrix",
"(",
"x",
")",
":",
"return",
"mirror_matrix",
"(",
"x",
".",
"T",
")",
"def",
"rotate180_matrix",
"(",
"x",
")",
":",
"return",
"x",
"[",
":",
":",
"-",
"1",
",",
":",
"]",
"def",
"rotate90_matrix",
"(",
"x",
")",
":",
"return",
"mirror_matrix",
"(",
"x",
")",
".",
"T",
"def",
"flip_matrix",
"(",
"x",
")",
":",
"return",
"mirror_matrix",
"(",
"rotate180_matrix",
"(",
"x",
")",
")",
"def",
"reorient_slice",
"(",
"x",
",",
"axis",
")",
":",
"if",
"(",
"axis",
"!=",
"1",
")",
":",
"x",
"=",
"rotate90_matrix",
"(",
"x",
")",
"if",
"(",
"axis",
"==",
"1",
")",
":",
"x",
"=",
"rotate90_matrix",
"(",
"x",
")",
"x",
"=",
"mirror_matrix",
"(",
"x",
")",
"return",
"x",
"def",
"slice_image",
"(",
"img",
",",
"axis",
",",
"idx",
")",
":",
"if",
"axis",
"==",
"0",
":",
"return",
"img",
"[",
"idx",
",",
":",
",",
":",
"]",
"elif",
"axis",
"==",
"1",
":",
"return",
"img",
"[",
":",
",",
"idx",
",",
":",
"]",
"elif",
"axis",
"==",
"2",
":",
"return",
"img",
"[",
":",
",",
":",
",",
"idx",
"]",
"elif",
"axis",
"==",
"-",
"1",
":",
"return",
"img",
"[",
":",
",",
":",
",",
"idx",
"]",
"elif",
"axis",
"==",
"-",
"2",
":",
"return",
"img",
"[",
":",
",",
"idx",
",",
":",
"]",
"elif",
"axis",
"==",
"-",
"3",
":",
"return",
"img",
"[",
"idx",
",",
":",
",",
":",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'axis %i not valid'",
"%",
"axis",
")",
"if",
"isinstance",
"(",
"images",
",",
"np",
".",
"ndarray",
")",
":",
"images",
"=",
"images",
".",
"tolist",
"(",
")",
"if",
"not",
"isinstance",
"(",
"images",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"'images argument must be of type list'",
")",
"if",
"not",
"isinstance",
"(",
"images",
"[",
"0",
"]",
",",
"list",
")",
":",
"images",
"=",
"[",
"images",
"]",
"if",
"isinstance",
"(",
"slices",
",",
"int",
")",
":",
"one_slice",
"=",
"True",
"if",
"isinstance",
"(",
"slices",
",",
"np",
".",
"ndarray",
")",
":",
"slices",
"=",
"slices",
".",
"tolist",
"(",
")",
"if",
"isinstance",
"(",
"slices",
",",
"list",
")",
":",
"one_slice",
"=",
"False",
"if",
"not",
"isinstance",
"(",
"slices",
"[",
"0",
"]",
",",
"list",
")",
":",
"slices",
"=",
"[",
"slices",
"]",
"nslicerow",
"=",
"len",
"(",
"slices",
")",
"nslicecol",
"=",
"len",
"(",
"slices",
"[",
"0",
"]",
")",
"nrow",
"=",
"len",
"(",
"images",
")",
"ncol",
"=",
"len",
"(",
"images",
"[",
"0",
"]",
")",
"if",
"rlabels",
"is",
"None",
":",
"rlabels",
"=",
"[",
"None",
"]",
"*",
"nrow",
"if",
"clabels",
"is",
"None",
":",
"clabels",
"=",
"[",
"None",
"]",
"*",
"ncol",
"if",
"(",
"not",
"one_slice",
")",
":",
"if",
"(",
"nrow",
"!=",
"nslicerow",
")",
"or",
"(",
"ncol",
"!=",
"nslicecol",
")",
":",
"raise",
"ValueError",
"(",
"'`images` arg shape (%i,%i) must equal `slices` arg shape (%i,%i)!'",
"%",
"(",
"nrow",
",",
"ncol",
",",
"nslicerow",
",",
"nslicecol",
")",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"(",
"ncol",
"+",
"1",
")",
"*",
"2.5",
"*",
"figsize",
",",
"(",
"nrow",
"+",
"1",
")",
"*",
"2.5",
"*",
"figsize",
")",
")",
"if",
"title",
"is",
"not",
"None",
":",
"basex",
"=",
"0.5",
"basey",
"=",
"0.9",
"if",
"clabels",
"[",
"0",
"]",
"is",
"None",
"else",
"0.95",
"fig",
".",
"suptitle",
"(",
"title",
",",
"fontsize",
"=",
"tfontsize",
",",
"x",
"=",
"basex",
"+",
"title_dx",
",",
"y",
"=",
"basey",
"+",
"title_dy",
")",
"if",
"(",
"cpad",
">",
"0",
")",
"and",
"(",
"rpad",
">",
"0",
")",
":",
"bothgridpad",
"=",
"max",
"(",
"cpad",
",",
"rpad",
")",
"cpad",
"=",
"0",
"rpad",
"=",
"0",
"else",
":",
"bothgridpad",
"=",
"0.0",
"gs",
"=",
"gridspec",
".",
"GridSpec",
"(",
"nrow",
",",
"ncol",
",",
"wspace",
"=",
"bothgridpad",
",",
"hspace",
"=",
"0.0",
",",
"top",
"=",
"1.",
"-",
"0.5",
"/",
"(",
"nrow",
"+",
"1",
")",
",",
"bottom",
"=",
"0.5",
"/",
"(",
"nrow",
"+",
"1",
")",
"+",
"cpad",
",",
"left",
"=",
"0.5",
"/",
"(",
"ncol",
"+",
"1",
")",
"+",
"rpad",
",",
"right",
"=",
"1",
"-",
"0.5",
"/",
"(",
"ncol",
"+",
"1",
")",
")",
"for",
"rowidx",
"in",
"range",
"(",
"nrow",
")",
":",
"for",
"colidx",
"in",
"range",
"(",
"ncol",
")",
":",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"gs",
"[",
"rowidx",
",",
"colidx",
"]",
")",
"if",
"colidx",
"==",
"0",
":",
"if",
"rlabels",
"[",
"rowidx",
"]",
"is",
"not",
"None",
":",
"bottom",
",",
"height",
"=",
".25",
",",
".5",
"top",
"=",
"bottom",
"+",
"height",
"# add label text",
"ax",
".",
"text",
"(",
"-",
"0.07",
",",
"0.5",
"*",
"(",
"bottom",
"+",
"top",
")",
",",
"rlabels",
"[",
"rowidx",
"]",
",",
"horizontalalignment",
"=",
"'right'",
",",
"verticalalignment",
"=",
"'center'",
",",
"rotation",
"=",
"'vertical'",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"color",
"=",
"rfontcolor",
",",
"fontsize",
"=",
"rfontsize",
")",
"# add label background",
"extra",
"=",
"0.3",
"if",
"rowidx",
"==",
"0",
"else",
"0.0",
"rect",
"=",
"patches",
".",
"Rectangle",
"(",
"(",
"-",
"0.3",
",",
"0",
")",
",",
"0.3",
",",
"1.0",
"+",
"extra",
",",
"facecolor",
"=",
"rfacecolor",
",",
"alpha",
"=",
"1.",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"clip_on",
"=",
"False",
")",
"ax",
".",
"add_patch",
"(",
"rect",
")",
"if",
"rowidx",
"==",
"0",
":",
"if",
"clabels",
"[",
"colidx",
"]",
"is",
"not",
"None",
":",
"bottom",
",",
"height",
"=",
".25",
",",
".5",
"left",
",",
"width",
"=",
".25",
",",
".5",
"right",
"=",
"left",
"+",
"width",
"top",
"=",
"bottom",
"+",
"height",
"ax",
".",
"text",
"(",
"0.5",
"*",
"(",
"left",
"+",
"right",
")",
",",
"0.09",
"+",
"top",
"+",
"bottom",
",",
"clabels",
"[",
"colidx",
"]",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
",",
"rotation",
"=",
"'horizontal'",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"color",
"=",
"cfontcolor",
",",
"fontsize",
"=",
"cfontsize",
")",
"# add label background",
"rect",
"=",
"patches",
".",
"Rectangle",
"(",
"(",
"0",
",",
"1.",
")",
",",
"1.0",
",",
"0.3",
",",
"facecolor",
"=",
"cfacecolor",
",",
"alpha",
"=",
"1.",
",",
"transform",
"=",
"ax",
".",
"transAxes",
",",
"clip_on",
"=",
"False",
")",
"ax",
".",
"add_patch",
"(",
"rect",
")",
"tmpimg",
"=",
"images",
"[",
"rowidx",
"]",
"[",
"colidx",
"]",
"if",
"isinstance",
"(",
"axes",
",",
"int",
")",
":",
"tmpaxis",
"=",
"axes",
"else",
":",
"tmpaxis",
"=",
"axes",
"[",
"rowidx",
"]",
"[",
"colidx",
"]",
"sliceidx",
"=",
"slices",
"[",
"rowidx",
"]",
"[",
"colidx",
"]",
"if",
"not",
"one_slice",
"else",
"slices",
"tmpslice",
"=",
"slice_image",
"(",
"tmpimg",
",",
"tmpaxis",
",",
"sliceidx",
")",
"tmpslice",
"=",
"reorient_slice",
"(",
"tmpslice",
",",
"tmpaxis",
")",
"ax",
".",
"imshow",
"(",
"tmpslice",
",",
"cmap",
"=",
"'Greys_r'",
",",
"aspect",
"=",
"'auto'",
")",
"ax",
".",
"axis",
"(",
"'off'",
")",
"if",
"filename",
"is",
"not",
"None",
":",
"filename",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
"plt",
".",
"savefig",
"(",
"filename",
",",
"dpi",
"=",
"dpi",
",",
"transparent",
"=",
"transparent",
",",
"bbox_inches",
"=",
"'tight'",
")",
"plt",
".",
"close",
"(",
"fig",
")",
"else",
":",
"plt",
".",
"show",
"(",
")"
] |
Plot a collection of images in an arbitrarily-defined grid
Matplotlib named colors: https://matplotlib.org/examples/color/named_colors.html
Arguments
---------
images : list of ANTsImage types
image(s) to plot.
if one image, this image will be used for all grid locations.
if multiple images, they should be arrange in a list the same
shape as the `gridsize` argument.
slices : integer or list of integers
slice indices to plot
if one integer, this slice index will be used for all images
if multiple integers, they should be arranged in a list the same
shape as the `gridsize` argument
axes : integer or list of integers
axis or axes along which to plot image slices
if one integer, this axis will be used for all images
if multiple integers, they should be arranged in a list the same
shape as the `gridsize` argument
Example
-------
>>> import ants
>>> import numpy as np
>>> mni1 = ants.image_read(ants.get_data('mni'))
>>> mni2 = mni1.smooth_image(1.)
>>> mni3 = mni1.smooth_image(2.)
>>> mni4 = mni1.smooth_image(3.)
>>> images = np.asarray([[mni1, mni2],
... [mni3, mni4]])
>>> slices = np.asarray([[100, 100],
... [100, 100]])
>>> #axes = np.asarray([[2,2],[2,2]])
>>> # standard plotting
>>> ants.plot_grid(images=images, slices=slices, title='2x2 Grid')
>>> ants.plot_grid(images.reshape(1,4), slices.reshape(1,4), title='1x4 Grid')
>>> ants.plot_grid(images.reshape(4,1), slices.reshape(4,1), title='4x1 Grid')
>>> # Padding between rows and/or columns
>>> ants.plot_grid(images, slices, cpad=0.02, title='Col Padding')
>>> ants.plot_grid(images, slices, rpad=0.02, title='Row Padding')
>>> ants.plot_grid(images, slices, rpad=0.02, cpad=0.02, title='Row and Col Padding')
>>> # Adding plain row and/or column labels
>>> ants.plot_grid(images, slices, title='Adding Row Labels', rlabels=['Row #1', 'Row #2'])
>>> ants.plot_grid(images, slices, title='Adding Col Labels', clabels=['Col #1', 'Col #2'])
>>> ants.plot_grid(images, slices, title='Row and Col Labels',
rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2'])
>>> # Making a publication-quality image
>>> images = np.asarray([[mni1, mni2, mni2],
... [mni3, mni4, mni4]])
>>> slices = np.asarray([[100, 100, 100],
... [100, 100, 100]])
>>> axes = np.asarray([[0, 1, 2],
[0, 1, 2]])
>>> ants.plot_grid(images, slices, axes, title='Publication Figures with ANTsPy',
tfontsize=20, title_dy=0.03, title_dx=-0.04,
rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2', 'Col 3'],
rfontsize=16, cfontsize=16)
|
[
"Plot",
"a",
"collection",
"of",
"images",
"in",
"an",
"arbitrarily",
"-",
"defined",
"grid",
"Matplotlib",
"named",
"colors",
":",
"https",
":",
"//",
"matplotlib",
".",
"org",
"/",
"examples",
"/",
"color",
"/",
"named_colors",
".",
"html"
] |
python
|
train
| 37.74537 |
ahmontero/dop
|
dop/client.py
|
https://github.com/ahmontero/dop/blob/40354ac6feefe92a7555fe2d1834138c9a03e518/dop/client.py#L630-L636
|
def destroy_ssh_key(self, ssh_key_id):
"""
This method will delete the SSH key from your account.
"""
json = self.request('/ssh_keys/%s/destroy' % ssh_key_id, method='GET')
status = json.get('status')
return status
|
[
"def",
"destroy_ssh_key",
"(",
"self",
",",
"ssh_key_id",
")",
":",
"json",
"=",
"self",
".",
"request",
"(",
"'/ssh_keys/%s/destroy'",
"%",
"ssh_key_id",
",",
"method",
"=",
"'GET'",
")",
"status",
"=",
"json",
".",
"get",
"(",
"'status'",
")",
"return",
"status"
] |
This method will delete the SSH key from your account.
|
[
"This",
"method",
"will",
"delete",
"the",
"SSH",
"key",
"from",
"your",
"account",
"."
] |
python
|
train
| 36.571429 |
pwaller/pyprof2calltree
|
pyprof2calltree.py
|
https://github.com/pwaller/pyprof2calltree/blob/62b99c7b366ad317d3d5e21fb73466c8baea670e/pyprof2calltree.py#L204-L211
|
def output(self, out_file):
"""Write the converted entries to out_file"""
self.out_file = out_file
out_file.write('event: ns : Nanoseconds\n')
out_file.write('events: ns\n')
self._output_summary()
for entry in sorted(self.entries, key=_entry_sort_key):
self._output_entry(entry)
|
[
"def",
"output",
"(",
"self",
",",
"out_file",
")",
":",
"self",
".",
"out_file",
"=",
"out_file",
"out_file",
".",
"write",
"(",
"'event: ns : Nanoseconds\\n'",
")",
"out_file",
".",
"write",
"(",
"'events: ns\\n'",
")",
"self",
".",
"_output_summary",
"(",
")",
"for",
"entry",
"in",
"sorted",
"(",
"self",
".",
"entries",
",",
"key",
"=",
"_entry_sort_key",
")",
":",
"self",
".",
"_output_entry",
"(",
"entry",
")"
] |
Write the converted entries to out_file
|
[
"Write",
"the",
"converted",
"entries",
"to",
"out_file"
] |
python
|
train
| 41.375 |
mozilla/crontabber
|
crontabber/app.py
|
https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/app.py#L264-L286
|
def items(self):
"""return all the app_names and their values as tuples"""
sql = """
SELECT
app_name,
next_run,
first_run,
last_run,
last_success,
depends_on,
error_count,
last_error
FROM crontabber"""
columns = (
'app_name',
'next_run', 'first_run', 'last_run', 'last_success',
'depends_on', 'error_count', 'last_error'
)
items = []
for record in self.transaction_executor(execute_query_fetchall, sql):
row = dict(zip(columns, record))
items.append((row.pop('app_name'), row))
return items
|
[
"def",
"items",
"(",
"self",
")",
":",
"sql",
"=",
"\"\"\"\n SELECT\n app_name,\n next_run,\n first_run,\n last_run,\n last_success,\n depends_on,\n error_count,\n last_error\n FROM crontabber\"\"\"",
"columns",
"=",
"(",
"'app_name'",
",",
"'next_run'",
",",
"'first_run'",
",",
"'last_run'",
",",
"'last_success'",
",",
"'depends_on'",
",",
"'error_count'",
",",
"'last_error'",
")",
"items",
"=",
"[",
"]",
"for",
"record",
"in",
"self",
".",
"transaction_executor",
"(",
"execute_query_fetchall",
",",
"sql",
")",
":",
"row",
"=",
"dict",
"(",
"zip",
"(",
"columns",
",",
"record",
")",
")",
"items",
".",
"append",
"(",
"(",
"row",
".",
"pop",
"(",
"'app_name'",
")",
",",
"row",
")",
")",
"return",
"items"
] |
return all the app_names and their values as tuples
|
[
"return",
"all",
"the",
"app_names",
"and",
"their",
"values",
"as",
"tuples"
] |
python
|
train
| 32 |
f3at/feat
|
src/feat/agents/base/agent.py
|
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/agents/base/agent.py#L363-L380
|
def substitute_partner(self, state, partners_recp, recp, alloc_id):
'''
Establish the partnership to recp and, when it is successfull
remove partner with recipient partners_recp.
Use with caution: The partner which we are removing is not notified
in any way, so he still keeps link in his description. The correct
usage of this method requires calling it from two agents which are
divorcing.
'''
partner = state.partners.find(recipient.IRecipient(partners_recp))
if not partner:
msg = 'subsitute_partner() did not find the partner %r' %\
partners_recp
self.error(msg)
return fiber.fail(partners.FindPartnerError(msg))
return self.establish_partnership(recp, partner.allocation_id,
alloc_id, substitute=partner)
|
[
"def",
"substitute_partner",
"(",
"self",
",",
"state",
",",
"partners_recp",
",",
"recp",
",",
"alloc_id",
")",
":",
"partner",
"=",
"state",
".",
"partners",
".",
"find",
"(",
"recipient",
".",
"IRecipient",
"(",
"partners_recp",
")",
")",
"if",
"not",
"partner",
":",
"msg",
"=",
"'subsitute_partner() did not find the partner %r'",
"%",
"partners_recp",
"self",
".",
"error",
"(",
"msg",
")",
"return",
"fiber",
".",
"fail",
"(",
"partners",
".",
"FindPartnerError",
"(",
"msg",
")",
")",
"return",
"self",
".",
"establish_partnership",
"(",
"recp",
",",
"partner",
".",
"allocation_id",
",",
"alloc_id",
",",
"substitute",
"=",
"partner",
")"
] |
Establish the partnership to recp and, when it is successfull
remove partner with recipient partners_recp.
Use with caution: The partner which we are removing is not notified
in any way, so he still keeps link in his description. The correct
usage of this method requires calling it from two agents which are
divorcing.
|
[
"Establish",
"the",
"partnership",
"to",
"recp",
"and",
"when",
"it",
"is",
"successfull",
"remove",
"partner",
"with",
"recipient",
"partners_recp",
"."
] |
python
|
train
| 48.777778 |
cloudmesh-cmd3/cmd3
|
cmd3/plugins/info.py
|
https://github.com/cloudmesh-cmd3/cmd3/blob/92e33c96032fd3921f159198a0e57917c4dc34ed/cmd3/plugins/info.py#L22-L42
|
def do_info(self, arg, arguments):
"""
::
Usage:
info [--all]
Options:
--all -a more extensive information
Prints some internal information about the shell
"""
if arguments["--all"]:
Console.ok(70 * "-")
Console.ok('DIR')
Console.ok(70 * "-")
for element in dir(self):
Console.ok(str(element))
Console.ok(70 * "-")
self.print_info()
|
[
"def",
"do_info",
"(",
"self",
",",
"arg",
",",
"arguments",
")",
":",
"if",
"arguments",
"[",
"\"--all\"",
"]",
":",
"Console",
".",
"ok",
"(",
"70",
"*",
"\"-\"",
")",
"Console",
".",
"ok",
"(",
"'DIR'",
")",
"Console",
".",
"ok",
"(",
"70",
"*",
"\"-\"",
")",
"for",
"element",
"in",
"dir",
"(",
"self",
")",
":",
"Console",
".",
"ok",
"(",
"str",
"(",
"element",
")",
")",
"Console",
".",
"ok",
"(",
"70",
"*",
"\"-\"",
")",
"self",
".",
"print_info",
"(",
")"
] |
::
Usage:
info [--all]
Options:
--all -a more extensive information
Prints some internal information about the shell
|
[
"::"
] |
python
|
train
| 24.285714 |
mfcloud/python-zvm-sdk
|
zvmsdk/smtclient.py
|
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/smtclient.py#L346-L355
|
def get_power_state(self, userid):
"""Get power status of a z/VM instance."""
LOG.debug('Querying power stat of %s' % userid)
requestData = "PowerVM " + userid + " status"
action = "query power state of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(requestData)
with zvmutils.expect_invalid_resp_data(results):
status = results['response'][0].partition(': ')[2]
return status
|
[
"def",
"get_power_state",
"(",
"self",
",",
"userid",
")",
":",
"LOG",
".",
"debug",
"(",
"'Querying power stat of %s'",
"%",
"userid",
")",
"requestData",
"=",
"\"PowerVM \"",
"+",
"userid",
"+",
"\" status\"",
"action",
"=",
"\"query power state of '%s'\"",
"%",
"userid",
"with",
"zvmutils",
".",
"log_and_reraise_smt_request_failed",
"(",
"action",
")",
":",
"results",
"=",
"self",
".",
"_request",
"(",
"requestData",
")",
"with",
"zvmutils",
".",
"expect_invalid_resp_data",
"(",
"results",
")",
":",
"status",
"=",
"results",
"[",
"'response'",
"]",
"[",
"0",
"]",
".",
"partition",
"(",
"': '",
")",
"[",
"2",
"]",
"return",
"status"
] |
Get power status of a z/VM instance.
|
[
"Get",
"power",
"status",
"of",
"a",
"z",
"/",
"VM",
"instance",
"."
] |
python
|
train
| 49.7 |
santoshphilip/eppy
|
eppy/hvacbuilder.py
|
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L72-L78
|
def makepipecomponent(idf, pname):
"""make a pipe component
generate inlet outlet names"""
apipe = idf.newidfobject("Pipe:Adiabatic".upper(), Name=pname)
apipe.Inlet_Node_Name = "%s_inlet" % (pname,)
apipe.Outlet_Node_Name = "%s_outlet" % (pname,)
return apipe
|
[
"def",
"makepipecomponent",
"(",
"idf",
",",
"pname",
")",
":",
"apipe",
"=",
"idf",
".",
"newidfobject",
"(",
"\"Pipe:Adiabatic\"",
".",
"upper",
"(",
")",
",",
"Name",
"=",
"pname",
")",
"apipe",
".",
"Inlet_Node_Name",
"=",
"\"%s_inlet\"",
"%",
"(",
"pname",
",",
")",
"apipe",
".",
"Outlet_Node_Name",
"=",
"\"%s_outlet\"",
"%",
"(",
"pname",
",",
")",
"return",
"apipe"
] |
make a pipe component
generate inlet outlet names
|
[
"make",
"a",
"pipe",
"component",
"generate",
"inlet",
"outlet",
"names"
] |
python
|
train
| 39.714286 |
python-wink/python-wink
|
src/pywink/devices/cloud_clock.py
|
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/cloud_clock.py#L183-L187
|
def update_state(self):
""" Update state with latest info from Wink API. """
response = self.api_interface.get_device_state(self, id_override=self.parent_id(),
type_override=self.parent_object_type())
self._update_state_from_response(response)
|
[
"def",
"update_state",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"api_interface",
".",
"get_device_state",
"(",
"self",
",",
"id_override",
"=",
"self",
".",
"parent_id",
"(",
")",
",",
"type_override",
"=",
"self",
".",
"parent_object_type",
"(",
")",
")",
"self",
".",
"_update_state_from_response",
"(",
"response",
")"
] |
Update state with latest info from Wink API.
|
[
"Update",
"state",
"with",
"latest",
"info",
"from",
"Wink",
"API",
"."
] |
python
|
train
| 64.4 |
saltstack/salt
|
salt/modules/postfix.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postfix.py#L284-L308
|
def set_main(key, value, path=MAIN_CF):
'''
Set a single config value in the main.cf file. If the value does not already
exist, it will be appended to the end.
CLI Example:
salt <minion> postfix.set_main mailq_path /usr/bin/mailq
'''
pairs, conf_list = _parse_main(path)
new_conf = []
key_line_match = re.compile("^{0}([\\s=]|$)".format(re.escape(key)))
if key in pairs:
for line in conf_list:
if re.match(key_line_match, line):
new_conf.append('{0} = {1}'.format(key, value))
else:
new_conf.append(line)
else:
conf_list.append('{0} = {1}'.format(key, value))
new_conf = conf_list
_write_conf(new_conf, path)
return new_conf
|
[
"def",
"set_main",
"(",
"key",
",",
"value",
",",
"path",
"=",
"MAIN_CF",
")",
":",
"pairs",
",",
"conf_list",
"=",
"_parse_main",
"(",
"path",
")",
"new_conf",
"=",
"[",
"]",
"key_line_match",
"=",
"re",
".",
"compile",
"(",
"\"^{0}([\\\\s=]|$)\"",
".",
"format",
"(",
"re",
".",
"escape",
"(",
"key",
")",
")",
")",
"if",
"key",
"in",
"pairs",
":",
"for",
"line",
"in",
"conf_list",
":",
"if",
"re",
".",
"match",
"(",
"key_line_match",
",",
"line",
")",
":",
"new_conf",
".",
"append",
"(",
"'{0} = {1}'",
".",
"format",
"(",
"key",
",",
"value",
")",
")",
"else",
":",
"new_conf",
".",
"append",
"(",
"line",
")",
"else",
":",
"conf_list",
".",
"append",
"(",
"'{0} = {1}'",
".",
"format",
"(",
"key",
",",
"value",
")",
")",
"new_conf",
"=",
"conf_list",
"_write_conf",
"(",
"new_conf",
",",
"path",
")",
"return",
"new_conf"
] |
Set a single config value in the main.cf file. If the value does not already
exist, it will be appended to the end.
CLI Example:
salt <minion> postfix.set_main mailq_path /usr/bin/mailq
|
[
"Set",
"a",
"single",
"config",
"value",
"in",
"the",
"main",
".",
"cf",
"file",
".",
"If",
"the",
"value",
"does",
"not",
"already",
"exist",
"it",
"will",
"be",
"appended",
"to",
"the",
"end",
"."
] |
python
|
train
| 29.6 |
lrq3000/pyFileFixity
|
pyFileFixity/lib/profilers/visual/pympler/process.py
|
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/process.py#L118-L152
|
def update(self):
"""
Get virtual size of current process by reading the process' stat file.
This should work for Linux.
"""
try:
stat = open('/proc/self/stat')
status = open('/proc/self/status')
except IOError: # pragma: no cover
return False
else:
stats = stat.read().split()
self.vsz = int( stats[22] )
self.rss = int( stats[23] ) * self.pagesize
self.pagefaults = int( stats[11] )
for entry in status.readlines():
key, value = entry.split(':')
size_in_bytes = lambda x: int(x.split()[0]) * 1024
if key == 'VmData':
self.data_segment = size_in_bytes(value)
elif key == 'VmExe':
self.code_segment = size_in_bytes(value)
elif key == 'VmLib':
self.shared_segment = size_in_bytes(value)
elif key == 'VmStk':
self.stack_segment = size_in_bytes(value)
key = self.key_map.get(key)
if key:
self.os_specific.append((key, value.strip()))
stat.close()
status.close()
return True
|
[
"def",
"update",
"(",
"self",
")",
":",
"try",
":",
"stat",
"=",
"open",
"(",
"'/proc/self/stat'",
")",
"status",
"=",
"open",
"(",
"'/proc/self/status'",
")",
"except",
"IOError",
":",
"# pragma: no cover",
"return",
"False",
"else",
":",
"stats",
"=",
"stat",
".",
"read",
"(",
")",
".",
"split",
"(",
")",
"self",
".",
"vsz",
"=",
"int",
"(",
"stats",
"[",
"22",
"]",
")",
"self",
".",
"rss",
"=",
"int",
"(",
"stats",
"[",
"23",
"]",
")",
"*",
"self",
".",
"pagesize",
"self",
".",
"pagefaults",
"=",
"int",
"(",
"stats",
"[",
"11",
"]",
")",
"for",
"entry",
"in",
"status",
".",
"readlines",
"(",
")",
":",
"key",
",",
"value",
"=",
"entry",
".",
"split",
"(",
"':'",
")",
"size_in_bytes",
"=",
"lambda",
"x",
":",
"int",
"(",
"x",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"*",
"1024",
"if",
"key",
"==",
"'VmData'",
":",
"self",
".",
"data_segment",
"=",
"size_in_bytes",
"(",
"value",
")",
"elif",
"key",
"==",
"'VmExe'",
":",
"self",
".",
"code_segment",
"=",
"size_in_bytes",
"(",
"value",
")",
"elif",
"key",
"==",
"'VmLib'",
":",
"self",
".",
"shared_segment",
"=",
"size_in_bytes",
"(",
"value",
")",
"elif",
"key",
"==",
"'VmStk'",
":",
"self",
".",
"stack_segment",
"=",
"size_in_bytes",
"(",
"value",
")",
"key",
"=",
"self",
".",
"key_map",
".",
"get",
"(",
"key",
")",
"if",
"key",
":",
"self",
".",
"os_specific",
".",
"append",
"(",
"(",
"key",
",",
"value",
".",
"strip",
"(",
")",
")",
")",
"stat",
".",
"close",
"(",
")",
"status",
".",
"close",
"(",
")",
"return",
"True"
] |
Get virtual size of current process by reading the process' stat file.
This should work for Linux.
|
[
"Get",
"virtual",
"size",
"of",
"current",
"process",
"by",
"reading",
"the",
"process",
"stat",
"file",
".",
"This",
"should",
"work",
"for",
"Linux",
"."
] |
python
|
train
| 35.857143 |
razor-x/scipy-data_fitting
|
scipy_data_fitting/figure/plot.py
|
https://github.com/razor-x/scipy-data_fitting/blob/c756a645da8629699b3f22244bfb7d5d4d88b179/scipy_data_fitting/figure/plot.py#L88-L92
|
def plot_fit(self):
"""
Add the fit to the plot.
"""
self.plt.plot(*self.fit.fit, **self.options['fit'])
|
[
"def",
"plot_fit",
"(",
"self",
")",
":",
"self",
".",
"plt",
".",
"plot",
"(",
"*",
"self",
".",
"fit",
".",
"fit",
",",
"*",
"*",
"self",
".",
"options",
"[",
"'fit'",
"]",
")"
] |
Add the fit to the plot.
|
[
"Add",
"the",
"fit",
"to",
"the",
"plot",
"."
] |
python
|
train
| 26.4 |
ereOn/azmq
|
azmq/multiplexer.py
|
https://github.com/ereOn/azmq/blob/9f40d6d721eea7f7659ec6cc668811976db59854/azmq/multiplexer.py#L41-L72
|
async def recv_multipart(self):
"""
Read from all the associated sockets.
:returns: A list of tuples (socket, frames) for each socket that
returned a result.
"""
if not self._sockets:
return []
results = []
async def recv_and_store(socket):
frames = await socket.recv_multipart()
results.append((socket, frames))
tasks = [
asyncio.ensure_future(recv_and_store(socket), loop=self.loop)
for socket in self._sockets
]
try:
await asyncio.wait(
tasks,
return_when=asyncio.FIRST_COMPLETED,
loop=self.loop,
)
finally:
for task in tasks:
task.cancel()
return results
|
[
"async",
"def",
"recv_multipart",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_sockets",
":",
"return",
"[",
"]",
"results",
"=",
"[",
"]",
"async",
"def",
"recv_and_store",
"(",
"socket",
")",
":",
"frames",
"=",
"await",
"socket",
".",
"recv_multipart",
"(",
")",
"results",
".",
"append",
"(",
"(",
"socket",
",",
"frames",
")",
")",
"tasks",
"=",
"[",
"asyncio",
".",
"ensure_future",
"(",
"recv_and_store",
"(",
"socket",
")",
",",
"loop",
"=",
"self",
".",
"loop",
")",
"for",
"socket",
"in",
"self",
".",
"_sockets",
"]",
"try",
":",
"await",
"asyncio",
".",
"wait",
"(",
"tasks",
",",
"return_when",
"=",
"asyncio",
".",
"FIRST_COMPLETED",
",",
"loop",
"=",
"self",
".",
"loop",
",",
")",
"finally",
":",
"for",
"task",
"in",
"tasks",
":",
"task",
".",
"cancel",
"(",
")",
"return",
"results"
] |
Read from all the associated sockets.
:returns: A list of tuples (socket, frames) for each socket that
returned a result.
|
[
"Read",
"from",
"all",
"the",
"associated",
"sockets",
"."
] |
python
|
train
| 25.03125 |
Qiskit/qiskit-terra
|
qiskit/quantum_info/operators/channel/transformations.py
|
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/transformations.py#L61-L72
|
def _to_kraus(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Kraus representation."""
if rep == 'Kraus':
return data
if rep == 'Stinespring':
return _stinespring_to_kraus(data, input_dim, output_dim)
if rep == 'Operator':
return _from_operator('Kraus', data, input_dim, output_dim)
# Convert via Choi and Kraus
if rep != 'Choi':
data = _to_choi(rep, data, input_dim, output_dim)
return _choi_to_kraus(data, input_dim, output_dim)
|
[
"def",
"_to_kraus",
"(",
"rep",
",",
"data",
",",
"input_dim",
",",
"output_dim",
")",
":",
"if",
"rep",
"==",
"'Kraus'",
":",
"return",
"data",
"if",
"rep",
"==",
"'Stinespring'",
":",
"return",
"_stinespring_to_kraus",
"(",
"data",
",",
"input_dim",
",",
"output_dim",
")",
"if",
"rep",
"==",
"'Operator'",
":",
"return",
"_from_operator",
"(",
"'Kraus'",
",",
"data",
",",
"input_dim",
",",
"output_dim",
")",
"# Convert via Choi and Kraus",
"if",
"rep",
"!=",
"'Choi'",
":",
"data",
"=",
"_to_choi",
"(",
"rep",
",",
"data",
",",
"input_dim",
",",
"output_dim",
")",
"return",
"_choi_to_kraus",
"(",
"data",
",",
"input_dim",
",",
"output_dim",
")"
] |
Transform a QuantumChannel to the Kraus representation.
|
[
"Transform",
"a",
"QuantumChannel",
"to",
"the",
"Kraus",
"representation",
"."
] |
python
|
test
| 41.916667 |
limpyd/redis-limpyd
|
limpyd/model.py
|
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/model.py#L278-L289
|
def get_class_field(cls, field_name):
"""
Return the field object with the given name (for the class, the fields
are in the "_redis_attr_%s" form)
"""
if not cls.has_field(field_name):
raise AttributeError('"%s" is not a field for the model "%s"' %
(field_name, cls.__name__))
field = getattr(cls, '_redis_attr_%s' % field_name)
return field
|
[
"def",
"get_class_field",
"(",
"cls",
",",
"field_name",
")",
":",
"if",
"not",
"cls",
".",
"has_field",
"(",
"field_name",
")",
":",
"raise",
"AttributeError",
"(",
"'\"%s\" is not a field for the model \"%s\"'",
"%",
"(",
"field_name",
",",
"cls",
".",
"__name__",
")",
")",
"field",
"=",
"getattr",
"(",
"cls",
",",
"'_redis_attr_%s'",
"%",
"field_name",
")",
"return",
"field"
] |
Return the field object with the given name (for the class, the fields
are in the "_redis_attr_%s" form)
|
[
"Return",
"the",
"field",
"object",
"with",
"the",
"given",
"name",
"(",
"for",
"the",
"class",
"the",
"fields",
"are",
"in",
"the",
"_redis_attr_%s",
"form",
")"
] |
python
|
train
| 36.083333 |
inspirehep/refextract
|
refextract/references/text.py
|
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/text.py#L91-L142
|
def get_reference_lines(docbody,
ref_sect_start_line,
ref_sect_end_line,
ref_sect_title,
ref_line_marker_ptn,
title_marker_same_line):
"""After the reference section of a document has been identified, and the
first and last lines of the reference section have been recorded, this
function is called to take the reference lines out of the document body.
The document's reference lines are returned in a list of strings whereby
each string is a reference line. Before this can be done however, the
reference section is passed to another function that rebuilds any broken
reference lines.
@param docbody: (list) of strings - the entire document body.
@param ref_sect_start_line: (integer) - the index in docbody of the first
reference line.
@param ref_sect_end_line: (integer) - the index in docbody of the last
reference line.
@param ref_sect_title: (string) - the title of the reference section
(e.g. "References").
@param ref_line_marker_ptn: (string) - the patern used to match the
marker for each reference line (e.g., could be used to match lines
with markers of the form [1], [2], etc.)
@param title_marker_same_line: (integer) - a flag to indicate whether
or not the reference section title was on the same line as the first
reference line's marker.
@return: (list) of strings. Each string is a reference line, extracted
from the document.
"""
start_idx = ref_sect_start_line
if title_marker_same_line:
# Title on same line as 1st ref- take title out!
title_start = docbody[start_idx].find(ref_sect_title)
if title_start != -1:
# Set the first line with no title
docbody[start_idx] = docbody[start_idx][title_start +
len(ref_sect_title):]
elif ref_sect_title is not None:
# Set the start of the reference section to be after the title line
start_idx += 1
if ref_sect_end_line is not None:
ref_lines = docbody[start_idx:ref_sect_end_line + 1]
else:
ref_lines = docbody[start_idx:]
if ref_sect_title:
ref_lines = strip_footer(ref_lines, ref_sect_title)
# Now rebuild reference lines:
# (Go through each raw reference line, and format them into a set
# of properly ordered lines based on markers)
return rebuild_reference_lines(ref_lines, ref_line_marker_ptn)
|
[
"def",
"get_reference_lines",
"(",
"docbody",
",",
"ref_sect_start_line",
",",
"ref_sect_end_line",
",",
"ref_sect_title",
",",
"ref_line_marker_ptn",
",",
"title_marker_same_line",
")",
":",
"start_idx",
"=",
"ref_sect_start_line",
"if",
"title_marker_same_line",
":",
"# Title on same line as 1st ref- take title out!",
"title_start",
"=",
"docbody",
"[",
"start_idx",
"]",
".",
"find",
"(",
"ref_sect_title",
")",
"if",
"title_start",
"!=",
"-",
"1",
":",
"# Set the first line with no title",
"docbody",
"[",
"start_idx",
"]",
"=",
"docbody",
"[",
"start_idx",
"]",
"[",
"title_start",
"+",
"len",
"(",
"ref_sect_title",
")",
":",
"]",
"elif",
"ref_sect_title",
"is",
"not",
"None",
":",
"# Set the start of the reference section to be after the title line",
"start_idx",
"+=",
"1",
"if",
"ref_sect_end_line",
"is",
"not",
"None",
":",
"ref_lines",
"=",
"docbody",
"[",
"start_idx",
":",
"ref_sect_end_line",
"+",
"1",
"]",
"else",
":",
"ref_lines",
"=",
"docbody",
"[",
"start_idx",
":",
"]",
"if",
"ref_sect_title",
":",
"ref_lines",
"=",
"strip_footer",
"(",
"ref_lines",
",",
"ref_sect_title",
")",
"# Now rebuild reference lines:",
"# (Go through each raw reference line, and format them into a set",
"# of properly ordered lines based on markers)",
"return",
"rebuild_reference_lines",
"(",
"ref_lines",
",",
"ref_line_marker_ptn",
")"
] |
After the reference section of a document has been identified, and the
first and last lines of the reference section have been recorded, this
function is called to take the reference lines out of the document body.
The document's reference lines are returned in a list of strings whereby
each string is a reference line. Before this can be done however, the
reference section is passed to another function that rebuilds any broken
reference lines.
@param docbody: (list) of strings - the entire document body.
@param ref_sect_start_line: (integer) - the index in docbody of the first
reference line.
@param ref_sect_end_line: (integer) - the index in docbody of the last
reference line.
@param ref_sect_title: (string) - the title of the reference section
(e.g. "References").
@param ref_line_marker_ptn: (string) - the patern used to match the
marker for each reference line (e.g., could be used to match lines
with markers of the form [1], [2], etc.)
@param title_marker_same_line: (integer) - a flag to indicate whether
or not the reference section title was on the same line as the first
reference line's marker.
@return: (list) of strings. Each string is a reference line, extracted
from the document.
|
[
"After",
"the",
"reference",
"section",
"of",
"a",
"document",
"has",
"been",
"identified",
"and",
"the",
"first",
"and",
"last",
"lines",
"of",
"the",
"reference",
"section",
"have",
"been",
"recorded",
"this",
"function",
"is",
"called",
"to",
"take",
"the",
"reference",
"lines",
"out",
"of",
"the",
"document",
"body",
".",
"The",
"document",
"s",
"reference",
"lines",
"are",
"returned",
"in",
"a",
"list",
"of",
"strings",
"whereby",
"each",
"string",
"is",
"a",
"reference",
"line",
".",
"Before",
"this",
"can",
"be",
"done",
"however",
"the",
"reference",
"section",
"is",
"passed",
"to",
"another",
"function",
"that",
"rebuilds",
"any",
"broken",
"reference",
"lines",
"."
] |
python
|
train
| 49.519231 |
StackStorm/pybind
|
pybind/slxos/v17s_1_02/routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/__init__.py#L583-L604
|
def _set_ra_dns_server(self, v, load=False):
"""
Setter method for ra_dns_server, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/ra_dns_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ra_dns_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ra_dns_server() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("dns_server_prefix",ra_dns_server.ra_dns_server, yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dns-server-prefix', extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}), is_container='list', yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ra_dns_server must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("dns_server_prefix",ra_dns_server.ra_dns_server, yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dns-server-prefix', extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}), is_container='list', yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='list', is_config=True)""",
})
self.__ra_dns_server = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_ra_dns_server",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"YANGListType",
"(",
"\"dns_server_prefix\"",
",",
"ra_dns_server",
".",
"ra_dns_server",
",",
"yang_name",
"=",
"\"ra-dns-server\"",
",",
"rest_name",
"=",
"\"ra-dns-server\"",
",",
"parent",
"=",
"self",
",",
"is_container",
"=",
"'list'",
",",
"user_ordered",
"=",
"False",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"yang_keys",
"=",
"'dns-server-prefix'",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Set DNS server option'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'cli-suppress-mode'",
":",
"None",
",",
"u'cli-suppress-key-abbreviation'",
":",
"None",
",",
"u'callpoint'",
":",
"u'IpV6NdRaDnsServerVlanIntf'",
"}",
"}",
")",
",",
"is_container",
"=",
"'list'",
",",
"yang_name",
"=",
"\"ra-dns-server\"",
",",
"rest_name",
"=",
"\"ra-dns-server\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'Set DNS server option'",
",",
"u'cli-suppress-list-no'",
":",
"None",
",",
"u'cli-suppress-mode'",
":",
"None",
",",
"u'cli-suppress-key-abbreviation'",
":",
"None",
",",
"u'callpoint'",
":",
"u'IpV6NdRaDnsServerVlanIntf'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-ipv6-nd-ra'",
",",
"defining_module",
"=",
"'brocade-ipv6-nd-ra'",
",",
"yang_type",
"=",
"'list'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"ra_dns_server must be of a type compatible with list\"\"\"",
",",
"'defined-type'",
":",
"\"list\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=YANGListType(\"dns_server_prefix\",ra_dns_server.ra_dns_server, yang_name=\"ra-dns-server\", rest_name=\"ra-dns-server\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dns-server-prefix', extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}), is_container='list', yang_name=\"ra-dns-server\", rest_name=\"ra-dns-server\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='list', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__ra_dns_server",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for ra_dns_server, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/ra_dns_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ra_dns_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ra_dns_server() directly.
|
[
"Setter",
"method",
"for",
"ra_dns_server",
"mapped",
"from",
"YANG",
"variable",
"/",
"routing_system",
"/",
"interface",
"/",
"ve",
"/",
"ipv6",
"/",
"ipv6_nd_ra",
"/",
"ipv6_intf_cmds",
"/",
"nd",
"/",
"ra_dns_server",
"(",
"list",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_ra_dns_server",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_ra_dns_server",
"()",
"directly",
"."
] |
python
|
train
| 124.818182 |
althonos/pronto
|
pronto/ontology.py
|
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/ontology.py#L539-L558
|
def obo(self):
"""str: the ontology serialized in obo format.
"""
meta = self._obo_meta()
meta = [meta] if meta else []
newline = "\n\n" if six.PY3 else "\n\n".encode('utf-8')
try: # if 'namespace' in self.meta:
return newline.join( meta + [
r.obo for r in self.typedefs
] + [
t.obo for t in self
if t.id.startswith(self.meta['namespace'][0])
])
except KeyError:
return newline.join( meta + [
r.obo for r in self.typedefs
] + [
t.obo for t in self
])
|
[
"def",
"obo",
"(",
"self",
")",
":",
"meta",
"=",
"self",
".",
"_obo_meta",
"(",
")",
"meta",
"=",
"[",
"meta",
"]",
"if",
"meta",
"else",
"[",
"]",
"newline",
"=",
"\"\\n\\n\"",
"if",
"six",
".",
"PY3",
"else",
"\"\\n\\n\"",
".",
"encode",
"(",
"'utf-8'",
")",
"try",
":",
"# if 'namespace' in self.meta:",
"return",
"newline",
".",
"join",
"(",
"meta",
"+",
"[",
"r",
".",
"obo",
"for",
"r",
"in",
"self",
".",
"typedefs",
"]",
"+",
"[",
"t",
".",
"obo",
"for",
"t",
"in",
"self",
"if",
"t",
".",
"id",
".",
"startswith",
"(",
"self",
".",
"meta",
"[",
"'namespace'",
"]",
"[",
"0",
"]",
")",
"]",
")",
"except",
"KeyError",
":",
"return",
"newline",
".",
"join",
"(",
"meta",
"+",
"[",
"r",
".",
"obo",
"for",
"r",
"in",
"self",
".",
"typedefs",
"]",
"+",
"[",
"t",
".",
"obo",
"for",
"t",
"in",
"self",
"]",
")"
] |
str: the ontology serialized in obo format.
|
[
"str",
":",
"the",
"ontology",
"serialized",
"in",
"obo",
"format",
"."
] |
python
|
train
| 32.2 |
axialmarket/fsq
|
fsq/const.py
|
https://github.com/axialmarket/fsq/blob/43b84c292cb8a187599d86753b947cf73248f989/fsq/const.py#L31-L60
|
def set_const(const, val):
'''Convenience wrapper to reliably set the value of a constant from
outside of package scope'''
try:
cur = getattr(_c, const)
except AttributeError:
raise FSQEnvError(errno.ENOENT, u'no such constant:'\
u' {0}'.format(const))
except TypeError:
raise TypeError(errno.EINVAL, u'const name must be a string or'\
u' unicode object, not:'\
u' {0}'.format(const.__class__.__name__))
should_be = cur.__class__
try:
if not isinstance(val, should_be):
if should_be is unicode or cur is None:
val = coerce_unicode(val, _c.FSQ_CHARSET)
elif should_be is int and const.endswith('MODE'):
val = int(val, 8)
elif isinstance(cur, numbers.Integral):
val = int(val)
else:
should_be(val)
except (TypeError, ValueError, ):
raise FSQEnvError(errno.EINVAL, u'invalid type for constant {0},'\
u' should be {1}, not:'\
u' {2}'.format(const, should_be.__name__,
val.__class__.__name__))
setattr(_c, const, val)
return val
|
[
"def",
"set_const",
"(",
"const",
",",
"val",
")",
":",
"try",
":",
"cur",
"=",
"getattr",
"(",
"_c",
",",
"const",
")",
"except",
"AttributeError",
":",
"raise",
"FSQEnvError",
"(",
"errno",
".",
"ENOENT",
",",
"u'no such constant:'",
"u' {0}'",
".",
"format",
"(",
"const",
")",
")",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"errno",
".",
"EINVAL",
",",
"u'const name must be a string or'",
"u' unicode object, not:'",
"u' {0}'",
".",
"format",
"(",
"const",
".",
"__class__",
".",
"__name__",
")",
")",
"should_be",
"=",
"cur",
".",
"__class__",
"try",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"should_be",
")",
":",
"if",
"should_be",
"is",
"unicode",
"or",
"cur",
"is",
"None",
":",
"val",
"=",
"coerce_unicode",
"(",
"val",
",",
"_c",
".",
"FSQ_CHARSET",
")",
"elif",
"should_be",
"is",
"int",
"and",
"const",
".",
"endswith",
"(",
"'MODE'",
")",
":",
"val",
"=",
"int",
"(",
"val",
",",
"8",
")",
"elif",
"isinstance",
"(",
"cur",
",",
"numbers",
".",
"Integral",
")",
":",
"val",
"=",
"int",
"(",
"val",
")",
"else",
":",
"should_be",
"(",
"val",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
")",
":",
"raise",
"FSQEnvError",
"(",
"errno",
".",
"EINVAL",
",",
"u'invalid type for constant {0},'",
"u' should be {1}, not:'",
"u' {2}'",
".",
"format",
"(",
"const",
",",
"should_be",
".",
"__name__",
",",
"val",
".",
"__class__",
".",
"__name__",
")",
")",
"setattr",
"(",
"_c",
",",
"const",
",",
"val",
")",
"return",
"val"
] |
Convenience wrapper to reliably set the value of a constant from
outside of package scope
|
[
"Convenience",
"wrapper",
"to",
"reliably",
"set",
"the",
"value",
"of",
"a",
"constant",
"from",
"outside",
"of",
"package",
"scope"
] |
python
|
train
| 41.866667 |
hugapi/hug
|
hug/decorators.py
|
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/decorators.py#L86-L95
|
def context_factory(apply_globally=False, api=None):
"""A decorator that registers a single hug context factory"""
def decorator(context_factory_):
if apply_globally:
hug.defaults.context_factory = context_factory_
else:
apply_to_api = hug.API(api) if api else hug.api.from_object(context_factory_)
apply_to_api.context_factory = context_factory_
return context_factory_
return decorator
|
[
"def",
"context_factory",
"(",
"apply_globally",
"=",
"False",
",",
"api",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"context_factory_",
")",
":",
"if",
"apply_globally",
":",
"hug",
".",
"defaults",
".",
"context_factory",
"=",
"context_factory_",
"else",
":",
"apply_to_api",
"=",
"hug",
".",
"API",
"(",
"api",
")",
"if",
"api",
"else",
"hug",
".",
"api",
".",
"from_object",
"(",
"context_factory_",
")",
"apply_to_api",
".",
"context_factory",
"=",
"context_factory_",
"return",
"context_factory_",
"return",
"decorator"
] |
A decorator that registers a single hug context factory
|
[
"A",
"decorator",
"that",
"registers",
"a",
"single",
"hug",
"context",
"factory"
] |
python
|
train
| 45 |
cocaine/cocaine-framework-python
|
cocaine/detail/secadaptor.py
|
https://github.com/cocaine/cocaine-framework-python/blob/d8a30074b6338bac4389eb996e00d404338115e4/cocaine/detail/secadaptor.py#L54-L65
|
def fetch_token(self):
"""Gains token from secure backend service.
:return: Token formatted for Cocaine protocol header.
"""
grant_type = 'client_credentials'
channel = yield self._tvm.ticket_full(
self._client_id, self._client_secret, grant_type, {})
ticket = yield channel.rx.get()
raise gen.Return(self._make_token(ticket))
|
[
"def",
"fetch_token",
"(",
"self",
")",
":",
"grant_type",
"=",
"'client_credentials'",
"channel",
"=",
"yield",
"self",
".",
"_tvm",
".",
"ticket_full",
"(",
"self",
".",
"_client_id",
",",
"self",
".",
"_client_secret",
",",
"grant_type",
",",
"{",
"}",
")",
"ticket",
"=",
"yield",
"channel",
".",
"rx",
".",
"get",
"(",
")",
"raise",
"gen",
".",
"Return",
"(",
"self",
".",
"_make_token",
"(",
"ticket",
")",
")"
] |
Gains token from secure backend service.
:return: Token formatted for Cocaine protocol header.
|
[
"Gains",
"token",
"from",
"secure",
"backend",
"service",
"."
] |
python
|
train
| 32.166667 |
project-rig/rig
|
rig/geometry.py
|
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L331-L371
|
def spinn5_local_eth_coord(x, y, w, h, root_x=0, root_y=0):
"""Get the coordinates of a chip's local ethernet connected chip.
Returns the coordinates of the ethernet connected chip on the same board as
the supplied chip.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
.. warning::
In general, applications should interrogate the machine to determine
which Ethernet connected chip is considered 'local' to a particular
SpiNNaker chip, e.g. using
:py:class:`rig.machine_control.MachineController.get_system_info`::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(si[(3, 2)].local_ethernet_chip)
(0, 0)
:py:func:`.spinn5_local_eth_coord` will always produce the coordinates
of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as
the supplied chip. In future versions of the low-level system software,
some other method of choosing local Ethernet connected chips may be
used.
Parameters
----------
x, y : int
Chip whose coordinates are of interest.
w, h : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
"""
dx, dy = SPINN5_ETH_OFFSET[(y - root_y) % 12][(x - root_x) % 12]
return ((x + int(dx)) % w), ((y + int(dy)) % h)
|
[
"def",
"spinn5_local_eth_coord",
"(",
"x",
",",
"y",
",",
"w",
",",
"h",
",",
"root_x",
"=",
"0",
",",
"root_y",
"=",
"0",
")",
":",
"dx",
",",
"dy",
"=",
"SPINN5_ETH_OFFSET",
"[",
"(",
"y",
"-",
"root_y",
")",
"%",
"12",
"]",
"[",
"(",
"x",
"-",
"root_x",
")",
"%",
"12",
"]",
"return",
"(",
"(",
"x",
"+",
"int",
"(",
"dx",
")",
")",
"%",
"w",
")",
",",
"(",
"(",
"y",
"+",
"int",
"(",
"dy",
")",
")",
"%",
"h",
")"
] |
Get the coordinates of a chip's local ethernet connected chip.
Returns the coordinates of the ethernet connected chip on the same board as
the supplied chip.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
.. warning::
In general, applications should interrogate the machine to determine
which Ethernet connected chip is considered 'local' to a particular
SpiNNaker chip, e.g. using
:py:class:`rig.machine_control.MachineController.get_system_info`::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(si[(3, 2)].local_ethernet_chip)
(0, 0)
:py:func:`.spinn5_local_eth_coord` will always produce the coordinates
of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as
the supplied chip. In future versions of the low-level system software,
some other method of choosing local Ethernet connected chips may be
used.
Parameters
----------
x, y : int
Chip whose coordinates are of interest.
w, h : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
|
[
"Get",
"the",
"coordinates",
"of",
"a",
"chip",
"s",
"local",
"ethernet",
"connected",
"chip",
"."
] |
python
|
train
| 39.195122 |
rameshg87/pyremotevbox
|
pyremotevbox/ZSI/generate/commands.py
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/commands.py#L501-L535
|
def _writepydoc(doc, *args):
"""create pydoc html pages
doc -- destination directory for documents
*args -- modules run thru pydoc
"""
ok = True
if not os.path.isdir(doc):
os.makedirs(doc)
if os.path.curdir not in sys.path:
sys.path.append(os.path.curdir)
for f in args:
if f.startswith('./'): f = f[2:]
name = os.path.sep.join(f.strip('.py').split(os.path.sep))
try:
e = __import__(name)
except Exception,ex:
raise
# _writebrokedoc(doc, ex, name)
# continue
if name.endswith('_client'):
_writeclientdoc(doc, e)
continue
if name.endswith('_types'):
_writetypesdoc(doc, e)
continue
try:
_writedoc(doc, e)
except IndexError,ex:
_writebrokedoc(doc, ex, name)
continue
|
[
"def",
"_writepydoc",
"(",
"doc",
",",
"*",
"args",
")",
":",
"ok",
"=",
"True",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"doc",
")",
":",
"os",
".",
"makedirs",
"(",
"doc",
")",
"if",
"os",
".",
"path",
".",
"curdir",
"not",
"in",
"sys",
".",
"path",
":",
"sys",
".",
"path",
".",
"append",
"(",
"os",
".",
"path",
".",
"curdir",
")",
"for",
"f",
"in",
"args",
":",
"if",
"f",
".",
"startswith",
"(",
"'./'",
")",
":",
"f",
"=",
"f",
"[",
"2",
":",
"]",
"name",
"=",
"os",
".",
"path",
".",
"sep",
".",
"join",
"(",
"f",
".",
"strip",
"(",
"'.py'",
")",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
")",
"try",
":",
"e",
"=",
"__import__",
"(",
"name",
")",
"except",
"Exception",
",",
"ex",
":",
"raise",
"# _writebrokedoc(doc, ex, name)",
"# continue",
"if",
"name",
".",
"endswith",
"(",
"'_client'",
")",
":",
"_writeclientdoc",
"(",
"doc",
",",
"e",
")",
"continue",
"if",
"name",
".",
"endswith",
"(",
"'_types'",
")",
":",
"_writetypesdoc",
"(",
"doc",
",",
"e",
")",
"continue",
"try",
":",
"_writedoc",
"(",
"doc",
",",
"e",
")",
"except",
"IndexError",
",",
"ex",
":",
"_writebrokedoc",
"(",
"doc",
",",
"ex",
",",
"name",
")",
"continue"
] |
create pydoc html pages
doc -- destination directory for documents
*args -- modules run thru pydoc
|
[
"create",
"pydoc",
"html",
"pages",
"doc",
"--",
"destination",
"directory",
"for",
"documents",
"*",
"args",
"--",
"modules",
"run",
"thru",
"pydoc"
] |
python
|
train
| 25.457143 |
consbio/ncdjango
|
ncdjango/geoprocessing/tasks/raster.py
|
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/tasks/raster.py#L82-L96
|
def get_context(self, arr, expr, context):
"""
Returns a context dictionary for use in evaluating the expression.
:param arr: The input array.
:param expr: The input expression.
:param context: Evaluation context.
"""
expression_names = [x for x in self.get_expression_names(expr) if x not in set(context.keys()).union(['i'])]
if len(expression_names) != 1:
raise ValueError('The expression must have exactly one variable.')
return {expression_names[0]: arr}
|
[
"def",
"get_context",
"(",
"self",
",",
"arr",
",",
"expr",
",",
"context",
")",
":",
"expression_names",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"get_expression_names",
"(",
"expr",
")",
"if",
"x",
"not",
"in",
"set",
"(",
"context",
".",
"keys",
"(",
")",
")",
".",
"union",
"(",
"[",
"'i'",
"]",
")",
"]",
"if",
"len",
"(",
"expression_names",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'The expression must have exactly one variable.'",
")",
"return",
"{",
"expression_names",
"[",
"0",
"]",
":",
"arr",
"}"
] |
Returns a context dictionary for use in evaluating the expression.
:param arr: The input array.
:param expr: The input expression.
:param context: Evaluation context.
|
[
"Returns",
"a",
"context",
"dictionary",
"for",
"use",
"in",
"evaluating",
"the",
"expression",
"."
] |
python
|
train
| 35.466667 |
mojaie/chorus
|
chorus/descriptor.py
|
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/descriptor.py#L53-L96
|
def assign_aromatic(mol):
"""Assign aromatic ring
sp2 atom:
pi=1 -> +1
N, O, S, C- -> +2
>C=O, B, C+ -> 0
sp3 atom -> not aromatic
sum of the score satisfies 4n+2 -> aromatic
"""
mol.require("Valence")
mol.require("MinifiedRing")
for ring in mol.rings:
pi_cnt = 0
for r in ring:
if mol.atom(r).pi == 0:
if mol.atom(r).symbol == "C":
if mol.atom(r).charge == 1:
pass
elif mol.atom(r).charge == -1:
pi_cnt += 2
else:
break
elif mol.atom(r).charge == 0:
if mol.atom(r).symbol in ("N", "O", "S"):
pi_cnt += 2
elif mol.atom(r).symbol == "B":
pass
else:
break
else:
break
elif mol.atom(r).pi == 1:
if mol.atom(r).carbonyl_C:
pass
else:
pi_cnt += 1
else:
break
else:
if pi_cnt % 4 == 2:
for u, v in iterator.consecutive(ring + [ring[0]], 2):
mol.atom(u).aromatic = True
mol.bond(u, v).aromatic = True
mol.descriptors.add("Aromatic")
|
[
"def",
"assign_aromatic",
"(",
"mol",
")",
":",
"mol",
".",
"require",
"(",
"\"Valence\"",
")",
"mol",
".",
"require",
"(",
"\"MinifiedRing\"",
")",
"for",
"ring",
"in",
"mol",
".",
"rings",
":",
"pi_cnt",
"=",
"0",
"for",
"r",
"in",
"ring",
":",
"if",
"mol",
".",
"atom",
"(",
"r",
")",
".",
"pi",
"==",
"0",
":",
"if",
"mol",
".",
"atom",
"(",
"r",
")",
".",
"symbol",
"==",
"\"C\"",
":",
"if",
"mol",
".",
"atom",
"(",
"r",
")",
".",
"charge",
"==",
"1",
":",
"pass",
"elif",
"mol",
".",
"atom",
"(",
"r",
")",
".",
"charge",
"==",
"-",
"1",
":",
"pi_cnt",
"+=",
"2",
"else",
":",
"break",
"elif",
"mol",
".",
"atom",
"(",
"r",
")",
".",
"charge",
"==",
"0",
":",
"if",
"mol",
".",
"atom",
"(",
"r",
")",
".",
"symbol",
"in",
"(",
"\"N\"",
",",
"\"O\"",
",",
"\"S\"",
")",
":",
"pi_cnt",
"+=",
"2",
"elif",
"mol",
".",
"atom",
"(",
"r",
")",
".",
"symbol",
"==",
"\"B\"",
":",
"pass",
"else",
":",
"break",
"else",
":",
"break",
"elif",
"mol",
".",
"atom",
"(",
"r",
")",
".",
"pi",
"==",
"1",
":",
"if",
"mol",
".",
"atom",
"(",
"r",
")",
".",
"carbonyl_C",
":",
"pass",
"else",
":",
"pi_cnt",
"+=",
"1",
"else",
":",
"break",
"else",
":",
"if",
"pi_cnt",
"%",
"4",
"==",
"2",
":",
"for",
"u",
",",
"v",
"in",
"iterator",
".",
"consecutive",
"(",
"ring",
"+",
"[",
"ring",
"[",
"0",
"]",
"]",
",",
"2",
")",
":",
"mol",
".",
"atom",
"(",
"u",
")",
".",
"aromatic",
"=",
"True",
"mol",
".",
"bond",
"(",
"u",
",",
"v",
")",
".",
"aromatic",
"=",
"True",
"mol",
".",
"descriptors",
".",
"add",
"(",
"\"Aromatic\"",
")"
] |
Assign aromatic ring
sp2 atom:
pi=1 -> +1
N, O, S, C- -> +2
>C=O, B, C+ -> 0
sp3 atom -> not aromatic
sum of the score satisfies 4n+2 -> aromatic
|
[
"Assign",
"aromatic",
"ring",
"sp2",
"atom",
":",
"pi",
"=",
"1",
"-",
">",
"+",
"1",
"N",
"O",
"S",
"C",
"-",
"-",
">",
"+",
"2",
">",
"C",
"=",
"O",
"B",
"C",
"+",
"-",
">",
"0",
"sp3",
"atom",
"-",
">",
"not",
"aromatic",
"sum",
"of",
"the",
"score",
"satisfies",
"4n",
"+",
"2",
"-",
">",
"aromatic"
] |
python
|
train
| 31.318182 |
carpedm20/ndrive
|
ndrive/models.py
|
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L185-L220
|
def login(self, user_id, password, svctype = "Android NDrive App ver", auth = 0):
"""Log in Naver and get cookie
Agrs:
user_id: Naver account's login id
password: Naver account's login password
Returns:
True: Login success
False: Login failed
Remarks:
self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES
"""
self.user_id = user_id
self.password = password
if self.user_id == None or self.password == None:
print "[*] Error __init__: user_id and password is needed"
return False
try:
cookie = naver_login(user_id, password)
except:
return False
self.session.cookies.set('NID_AUT', cookie["NID_AUT"])
self.session.cookies.set('NID_SES', cookie["NID_SES"])
s = self.getRegisterUserInfo(svctype, auth)
if s is True:
return True
else:
print "[*] Error getRegisterUserInfo: failed"
return False
|
[
"def",
"login",
"(",
"self",
",",
"user_id",
",",
"password",
",",
"svctype",
"=",
"\"Android NDrive App ver\"",
",",
"auth",
"=",
"0",
")",
":",
"self",
".",
"user_id",
"=",
"user_id",
"self",
".",
"password",
"=",
"password",
"if",
"self",
".",
"user_id",
"==",
"None",
"or",
"self",
".",
"password",
"==",
"None",
":",
"print",
"\"[*] Error __init__: user_id and password is needed\"",
"return",
"False",
"try",
":",
"cookie",
"=",
"naver_login",
"(",
"user_id",
",",
"password",
")",
"except",
":",
"return",
"False",
"self",
".",
"session",
".",
"cookies",
".",
"set",
"(",
"'NID_AUT'",
",",
"cookie",
"[",
"\"NID_AUT\"",
"]",
")",
"self",
".",
"session",
".",
"cookies",
".",
"set",
"(",
"'NID_SES'",
",",
"cookie",
"[",
"\"NID_SES\"",
"]",
")",
"s",
"=",
"self",
".",
"getRegisterUserInfo",
"(",
"svctype",
",",
"auth",
")",
"if",
"s",
"is",
"True",
":",
"return",
"True",
"else",
":",
"print",
"\"[*] Error getRegisterUserInfo: failed\"",
"return",
"False"
] |
Log in Naver and get cookie
Agrs:
user_id: Naver account's login id
password: Naver account's login password
Returns:
True: Login success
False: Login failed
Remarks:
self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES
|
[
"Log",
"in",
"Naver",
"and",
"get",
"cookie"
] |
python
|
train
| 29.277778 |
yyuu/botornado
|
boto/ec2/connection.py
|
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/connection.py#L352-L373
|
def get_image_attribute(self, image_id, attribute='launchPermission'):
"""
Gets an attribute from an image.
:type image_id: string
:param image_id: The Amazon image id for which you want info about
:type attribute: string
:param attribute: The attribute you need information about.
Valid choices are:
* launchPermission
* productCodes
* blockDeviceMapping
:rtype: :class:`boto.ec2.image.ImageAttribute`
:return: An ImageAttribute object representing the value of the
attribute requested
"""
params = {'ImageId' : image_id,
'Attribute' : attribute}
return self.get_object('DescribeImageAttribute', params,
ImageAttribute, verb='POST')
|
[
"def",
"get_image_attribute",
"(",
"self",
",",
"image_id",
",",
"attribute",
"=",
"'launchPermission'",
")",
":",
"params",
"=",
"{",
"'ImageId'",
":",
"image_id",
",",
"'Attribute'",
":",
"attribute",
"}",
"return",
"self",
".",
"get_object",
"(",
"'DescribeImageAttribute'",
",",
"params",
",",
"ImageAttribute",
",",
"verb",
"=",
"'POST'",
")"
] |
Gets an attribute from an image.
:type image_id: string
:param image_id: The Amazon image id for which you want info about
:type attribute: string
:param attribute: The attribute you need information about.
Valid choices are:
* launchPermission
* productCodes
* blockDeviceMapping
:rtype: :class:`boto.ec2.image.ImageAttribute`
:return: An ImageAttribute object representing the value of the
attribute requested
|
[
"Gets",
"an",
"attribute",
"from",
"an",
"image",
"."
] |
python
|
train
| 39.681818 |
senaite/senaite.core
|
bika/lims/browser/widgets/reflexrulewidget.py
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/widgets/reflexrulewidget.py#L219-L232
|
def _get_sorted_cond_keys(self, keys_list):
"""
This function returns only the elements starting with
'analysisservice-' in 'keys_list'. The returned list is sorted by the
index appended to the end of each element
"""
# The names can be found in reflexrulewidget.pt inside the
# conditionscontainer div.
cond_list = []
for key in keys_list:
if key.startswith('analysisservice-'):
cond_list.append(key)
cond_list.sort()
return cond_list
|
[
"def",
"_get_sorted_cond_keys",
"(",
"self",
",",
"keys_list",
")",
":",
"# The names can be found in reflexrulewidget.pt inside the",
"# conditionscontainer div.",
"cond_list",
"=",
"[",
"]",
"for",
"key",
"in",
"keys_list",
":",
"if",
"key",
".",
"startswith",
"(",
"'analysisservice-'",
")",
":",
"cond_list",
".",
"append",
"(",
"key",
")",
"cond_list",
".",
"sort",
"(",
")",
"return",
"cond_list"
] |
This function returns only the elements starting with
'analysisservice-' in 'keys_list'. The returned list is sorted by the
index appended to the end of each element
|
[
"This",
"function",
"returns",
"only",
"the",
"elements",
"starting",
"with",
"analysisservice",
"-",
"in",
"keys_list",
".",
"The",
"returned",
"list",
"is",
"sorted",
"by",
"the",
"index",
"appended",
"to",
"the",
"end",
"of",
"each",
"element"
] |
python
|
train
| 38.428571 |
rsheftel/raccoon
|
raccoon/series.py
|
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/series.py#L170-L195
|
def get_slice(self, start_index=None, stop_index=None, as_list=False):
"""
For sorted Series will return either a Series or list of all of the rows where the index is greater than
or equal to the start_index if provided and less than or equal to the stop_index if provided. If either the
start or stop index is None then will include from the first or last element, similar to standard python
slide of [:5] or [:5]. Both end points are considered inclusive.
:param start_index: lowest index value to include, or None to start from the first row
:param stop_index: highest index value to include, or None to end at the last row
:param as_list: if True then return a list of the indexes and values
:return: Series or tuple of (index list, values list)
"""
if not self._sort:
raise RuntimeError('Can only use get_slice on sorted Series')
start_location = bisect_left(self._index, start_index) if start_index is not None else None
stop_location = bisect_right(self._index, stop_index) if stop_index is not None else None
index = self._index[start_location:stop_location]
data = self._data[start_location:stop_location]
if as_list:
return index, data
else:
return Series(data=data, index=index, data_name=self._data_name, index_name=self._index_name,
sort=self._sort)
|
[
"def",
"get_slice",
"(",
"self",
",",
"start_index",
"=",
"None",
",",
"stop_index",
"=",
"None",
",",
"as_list",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_sort",
":",
"raise",
"RuntimeError",
"(",
"'Can only use get_slice on sorted Series'",
")",
"start_location",
"=",
"bisect_left",
"(",
"self",
".",
"_index",
",",
"start_index",
")",
"if",
"start_index",
"is",
"not",
"None",
"else",
"None",
"stop_location",
"=",
"bisect_right",
"(",
"self",
".",
"_index",
",",
"stop_index",
")",
"if",
"stop_index",
"is",
"not",
"None",
"else",
"None",
"index",
"=",
"self",
".",
"_index",
"[",
"start_location",
":",
"stop_location",
"]",
"data",
"=",
"self",
".",
"_data",
"[",
"start_location",
":",
"stop_location",
"]",
"if",
"as_list",
":",
"return",
"index",
",",
"data",
"else",
":",
"return",
"Series",
"(",
"data",
"=",
"data",
",",
"index",
"=",
"index",
",",
"data_name",
"=",
"self",
".",
"_data_name",
",",
"index_name",
"=",
"self",
".",
"_index_name",
",",
"sort",
"=",
"self",
".",
"_sort",
")"
] |
For sorted Series will return either a Series or list of all of the rows where the index is greater than
or equal to the start_index if provided and less than or equal to the stop_index if provided. If either the
start or stop index is None then will include from the first or last element, similar to standard python
slide of [:5] or [:5]. Both end points are considered inclusive.
:param start_index: lowest index value to include, or None to start from the first row
:param stop_index: highest index value to include, or None to end at the last row
:param as_list: if True then return a list of the indexes and values
:return: Series or tuple of (index list, values list)
|
[
"For",
"sorted",
"Series",
"will",
"return",
"either",
"a",
"Series",
"or",
"list",
"of",
"all",
"of",
"the",
"rows",
"where",
"the",
"index",
"is",
"greater",
"than",
"or",
"equal",
"to",
"the",
"start_index",
"if",
"provided",
"and",
"less",
"than",
"or",
"equal",
"to",
"the",
"stop_index",
"if",
"provided",
".",
"If",
"either",
"the",
"start",
"or",
"stop",
"index",
"is",
"None",
"then",
"will",
"include",
"from",
"the",
"first",
"or",
"last",
"element",
"similar",
"to",
"standard",
"python",
"slide",
"of",
"[",
":",
"5",
"]",
"or",
"[",
":",
"5",
"]",
".",
"Both",
"end",
"points",
"are",
"considered",
"inclusive",
"."
] |
python
|
train
| 55.346154 |
apache/spark
|
python/pyspark/sql/column.py
|
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L81-L87
|
def _unary_op(name, doc="unary operator"):
""" Create a method for given unary operator """
def _(self):
jc = getattr(self._jc, name)()
return Column(jc)
_.__doc__ = doc
return _
|
[
"def",
"_unary_op",
"(",
"name",
",",
"doc",
"=",
"\"unary operator\"",
")",
":",
"def",
"_",
"(",
"self",
")",
":",
"jc",
"=",
"getattr",
"(",
"self",
".",
"_jc",
",",
"name",
")",
"(",
")",
"return",
"Column",
"(",
"jc",
")",
"_",
".",
"__doc__",
"=",
"doc",
"return",
"_"
] |
Create a method for given unary operator
|
[
"Create",
"a",
"method",
"for",
"given",
"unary",
"operator"
] |
python
|
train
| 29.142857 |
lemieuxl/pyGenClean
|
pyGenClean/LaTeX/auto_report.py
|
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/LaTeX/auto_report.py#L27-L181
|
def create_report(outdirname, report_filename, **kwargs):
"""Creates a LaTeX report.
:param report_filename: the name of the file.
:param outdirname: the name of the output directory.
:type report_filename: str
:type outdirname: str
"""
# Checking the required variables
if "steps" in kwargs:
assert "descriptions" in kwargs
assert "long_descriptions" in kwargs
assert "steps_filename" not in kwargs
else:
assert "steps_filename" in kwargs
assert "descriptions" not in kwargs
assert "long_descriptions" not in kwargs
assert "summaries" in kwargs
assert "background" in kwargs
assert "project_name" in kwargs
assert "summary_fn" in kwargs
assert "report_title" in kwargs
assert "report_author" in kwargs
assert "initial_files" in kwargs
assert "final_nb_markers" in kwargs
assert "final_nb_samples" in kwargs
assert "final_files" in kwargs
assert "plink_version" in kwargs
assert "graphic_paths_fn" in kwargs
# Formatting the background section
background_section = _format_background(kwargs["background"])
# Writing the method steps to a separate file (for access later)
steps_filename = None
if "steps_filename" in kwargs:
steps_filename = kwargs["steps_filename"]
else:
steps_filename = os.path.join(outdirname, "steps_summary.tex")
with open(steps_filename, "w") as o_file:
zipped = zip(kwargs["steps"], kwargs["descriptions"],
kwargs["long_descriptions"])
for step, desc, long_desc in zipped:
if desc.endswith("."):
desc = desc[:-1]
step = step.replace("_", r"\_")
to_print = latex.item(desc)
to_print += " [{}].".format(latex.texttt(step))
if long_desc is not None:
to_print += " " + long_desc
print >>o_file, latex.wrap_lines(to_print) + "\n"
# Adding the content of the results section
result_summaries = []
for name in kwargs["summaries"]:
full_path = os.path.abspath(name)
if os.path.isfile(full_path):
rel_path = os.path.relpath(full_path, outdirname)
result_summaries.append(re.sub(r"\\", "/", rel_path))
# Reading the initial_files file
initial_files = None
with open(kwargs["initial_files"], "r") as i_file:
initial_files = i_file.read().splitlines()
# Reading the final_files file
final_files = None
with open(kwargs["final_files"], "r") as i_file:
final_files = [i.split("\t")[0] for i in i_file.read().splitlines()]
# Adding the bibliography content
biblio_entry = latex.bib_entry(
name="pyGenClean",
authors="Lemieux Perreault LP, Provost S, Legault MA, Barhdadi A, "
r"Dub\'e MP",
title="pyGenClean: efficient tool for genetic data clean up before "
"association testing",
journal="Bioinformatics",
year="2013",
volume="29",
number="13",
pages="1704--1705",
) + "\n" * 2 + latex.bib_entry(
name="plink",
authors="Purcell S, Neale B, Todd-Brown K, Thomas L, Ferreira MAR, "
"Bender D, Maller J, Sklar P, de Bakker PIW, Daly MJ, Sham PC",
title="PLINK: a tool set for whole-genome association and "
"population-based linkage analyses",
journal="American Journal of Human Genetics",
year="2007",
volume="81",
number="3",
pages="559--575",
) + "\n" * 2 + latex.bib_entry(
name="bafRegress",
authors=r"Goo J, Matthew F, Kurt NH, Jane MR, Kimberly FD, "
r"Gon{\c{c}}alo RA, Michael B, Hyun Min K",
title="Detecting and estimating contamination of human DNA samples in "
"sequencing and array-based genotype data",
journal="The American Journal of Human Genetics",
year="2012",
volume="91",
number="5",
pages="839--848",
)
# Getting the template
main_template = latex.jinja2_env.get_template("main_document.tex")
# Getting the data
today = datetime.today()
# Reading the graphics path
graphic_paths = []
if kwargs["graphic_paths_fn"] is not None:
with open(kwargs["graphic_paths_fn"], "r") as i_file:
graphic_paths = [
re.sub(r"\\", "/", path) + ("" if path.endswith("/") else "/")
for path in i_file.read().splitlines()
]
try:
with open(report_filename, "w") as i_file:
# Rendering the template
print >>i_file, main_template.render(
project_name=latex.sanitize_tex(kwargs["project_name"]),
month=today.strftime("%B"),
day=today.day,
year=today.year,
background_content=background_section,
result_summaries=result_summaries,
bibliography_content=biblio_entry,
pygenclean_version=pygenclean_version,
plink_version=kwargs["plink_version"],
steps_filename=os.path.basename(steps_filename),
final_results=_create_summary_table(
kwargs["summary_fn"],
latex.jinja2_env.get_template("summary_table.tex"),
nb_samples=kwargs["final_nb_samples"],
nb_markers=kwargs["final_nb_markers"],
),
report_title=latex.sanitize_tex(kwargs["report_title"]),
report_author=latex.sanitize_tex(kwargs["report_author"]),
initial_files=initial_files,
final_files=final_files,
final_nb_samples=kwargs["final_nb_samples"],
final_nb_markers=kwargs["final_nb_markers"],
graphic_paths=graphic_paths,
)
except IOError:
msg = "{}: could not create report".format(report_filename)
raise ProgramError(msg)
|
[
"def",
"create_report",
"(",
"outdirname",
",",
"report_filename",
",",
"*",
"*",
"kwargs",
")",
":",
"# Checking the required variables",
"if",
"\"steps\"",
"in",
"kwargs",
":",
"assert",
"\"descriptions\"",
"in",
"kwargs",
"assert",
"\"long_descriptions\"",
"in",
"kwargs",
"assert",
"\"steps_filename\"",
"not",
"in",
"kwargs",
"else",
":",
"assert",
"\"steps_filename\"",
"in",
"kwargs",
"assert",
"\"descriptions\"",
"not",
"in",
"kwargs",
"assert",
"\"long_descriptions\"",
"not",
"in",
"kwargs",
"assert",
"\"summaries\"",
"in",
"kwargs",
"assert",
"\"background\"",
"in",
"kwargs",
"assert",
"\"project_name\"",
"in",
"kwargs",
"assert",
"\"summary_fn\"",
"in",
"kwargs",
"assert",
"\"report_title\"",
"in",
"kwargs",
"assert",
"\"report_author\"",
"in",
"kwargs",
"assert",
"\"initial_files\"",
"in",
"kwargs",
"assert",
"\"final_nb_markers\"",
"in",
"kwargs",
"assert",
"\"final_nb_samples\"",
"in",
"kwargs",
"assert",
"\"final_files\"",
"in",
"kwargs",
"assert",
"\"plink_version\"",
"in",
"kwargs",
"assert",
"\"graphic_paths_fn\"",
"in",
"kwargs",
"# Formatting the background section",
"background_section",
"=",
"_format_background",
"(",
"kwargs",
"[",
"\"background\"",
"]",
")",
"# Writing the method steps to a separate file (for access later)",
"steps_filename",
"=",
"None",
"if",
"\"steps_filename\"",
"in",
"kwargs",
":",
"steps_filename",
"=",
"kwargs",
"[",
"\"steps_filename\"",
"]",
"else",
":",
"steps_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdirname",
",",
"\"steps_summary.tex\"",
")",
"with",
"open",
"(",
"steps_filename",
",",
"\"w\"",
")",
"as",
"o_file",
":",
"zipped",
"=",
"zip",
"(",
"kwargs",
"[",
"\"steps\"",
"]",
",",
"kwargs",
"[",
"\"descriptions\"",
"]",
",",
"kwargs",
"[",
"\"long_descriptions\"",
"]",
")",
"for",
"step",
",",
"desc",
",",
"long_desc",
"in",
"zipped",
":",
"if",
"desc",
".",
"endswith",
"(",
"\".\"",
")",
":",
"desc",
"=",
"desc",
"[",
":",
"-",
"1",
"]",
"step",
"=",
"step",
".",
"replace",
"(",
"\"_\"",
",",
"r\"\\_\"",
")",
"to_print",
"=",
"latex",
".",
"item",
"(",
"desc",
")",
"to_print",
"+=",
"\" [{}].\"",
".",
"format",
"(",
"latex",
".",
"texttt",
"(",
"step",
")",
")",
"if",
"long_desc",
"is",
"not",
"None",
":",
"to_print",
"+=",
"\" \"",
"+",
"long_desc",
"print",
">>",
"o_file",
",",
"latex",
".",
"wrap_lines",
"(",
"to_print",
")",
"+",
"\"\\n\"",
"# Adding the content of the results section",
"result_summaries",
"=",
"[",
"]",
"for",
"name",
"in",
"kwargs",
"[",
"\"summaries\"",
"]",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"name",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"full_path",
")",
":",
"rel_path",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"full_path",
",",
"outdirname",
")",
"result_summaries",
".",
"append",
"(",
"re",
".",
"sub",
"(",
"r\"\\\\\"",
",",
"\"/\"",
",",
"rel_path",
")",
")",
"# Reading the initial_files file",
"initial_files",
"=",
"None",
"with",
"open",
"(",
"kwargs",
"[",
"\"initial_files\"",
"]",
",",
"\"r\"",
")",
"as",
"i_file",
":",
"initial_files",
"=",
"i_file",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"# Reading the final_files file",
"final_files",
"=",
"None",
"with",
"open",
"(",
"kwargs",
"[",
"\"final_files\"",
"]",
",",
"\"r\"",
")",
"as",
"i_file",
":",
"final_files",
"=",
"[",
"i",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"i_file",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"]",
"# Adding the bibliography content",
"biblio_entry",
"=",
"latex",
".",
"bib_entry",
"(",
"name",
"=",
"\"pyGenClean\"",
",",
"authors",
"=",
"\"Lemieux Perreault LP, Provost S, Legault MA, Barhdadi A, \"",
"r\"Dub\\'e MP\"",
",",
"title",
"=",
"\"pyGenClean: efficient tool for genetic data clean up before \"",
"\"association testing\"",
",",
"journal",
"=",
"\"Bioinformatics\"",
",",
"year",
"=",
"\"2013\"",
",",
"volume",
"=",
"\"29\"",
",",
"number",
"=",
"\"13\"",
",",
"pages",
"=",
"\"1704--1705\"",
",",
")",
"+",
"\"\\n\"",
"*",
"2",
"+",
"latex",
".",
"bib_entry",
"(",
"name",
"=",
"\"plink\"",
",",
"authors",
"=",
"\"Purcell S, Neale B, Todd-Brown K, Thomas L, Ferreira MAR, \"",
"\"Bender D, Maller J, Sklar P, de Bakker PIW, Daly MJ, Sham PC\"",
",",
"title",
"=",
"\"PLINK: a tool set for whole-genome association and \"",
"\"population-based linkage analyses\"",
",",
"journal",
"=",
"\"American Journal of Human Genetics\"",
",",
"year",
"=",
"\"2007\"",
",",
"volume",
"=",
"\"81\"",
",",
"number",
"=",
"\"3\"",
",",
"pages",
"=",
"\"559--575\"",
",",
")",
"+",
"\"\\n\"",
"*",
"2",
"+",
"latex",
".",
"bib_entry",
"(",
"name",
"=",
"\"bafRegress\"",
",",
"authors",
"=",
"r\"Goo J, Matthew F, Kurt NH, Jane MR, Kimberly FD, \"",
"r\"Gon{\\c{c}}alo RA, Michael B, Hyun Min K\"",
",",
"title",
"=",
"\"Detecting and estimating contamination of human DNA samples in \"",
"\"sequencing and array-based genotype data\"",
",",
"journal",
"=",
"\"The American Journal of Human Genetics\"",
",",
"year",
"=",
"\"2012\"",
",",
"volume",
"=",
"\"91\"",
",",
"number",
"=",
"\"5\"",
",",
"pages",
"=",
"\"839--848\"",
",",
")",
"# Getting the template",
"main_template",
"=",
"latex",
".",
"jinja2_env",
".",
"get_template",
"(",
"\"main_document.tex\"",
")",
"# Getting the data",
"today",
"=",
"datetime",
".",
"today",
"(",
")",
"# Reading the graphics path",
"graphic_paths",
"=",
"[",
"]",
"if",
"kwargs",
"[",
"\"graphic_paths_fn\"",
"]",
"is",
"not",
"None",
":",
"with",
"open",
"(",
"kwargs",
"[",
"\"graphic_paths_fn\"",
"]",
",",
"\"r\"",
")",
"as",
"i_file",
":",
"graphic_paths",
"=",
"[",
"re",
".",
"sub",
"(",
"r\"\\\\\"",
",",
"\"/\"",
",",
"path",
")",
"+",
"(",
"\"\"",
"if",
"path",
".",
"endswith",
"(",
"\"/\"",
")",
"else",
"\"/\"",
")",
"for",
"path",
"in",
"i_file",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"]",
"try",
":",
"with",
"open",
"(",
"report_filename",
",",
"\"w\"",
")",
"as",
"i_file",
":",
"# Rendering the template",
"print",
">>",
"i_file",
",",
"main_template",
".",
"render",
"(",
"project_name",
"=",
"latex",
".",
"sanitize_tex",
"(",
"kwargs",
"[",
"\"project_name\"",
"]",
")",
",",
"month",
"=",
"today",
".",
"strftime",
"(",
"\"%B\"",
")",
",",
"day",
"=",
"today",
".",
"day",
",",
"year",
"=",
"today",
".",
"year",
",",
"background_content",
"=",
"background_section",
",",
"result_summaries",
"=",
"result_summaries",
",",
"bibliography_content",
"=",
"biblio_entry",
",",
"pygenclean_version",
"=",
"pygenclean_version",
",",
"plink_version",
"=",
"kwargs",
"[",
"\"plink_version\"",
"]",
",",
"steps_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"steps_filename",
")",
",",
"final_results",
"=",
"_create_summary_table",
"(",
"kwargs",
"[",
"\"summary_fn\"",
"]",
",",
"latex",
".",
"jinja2_env",
".",
"get_template",
"(",
"\"summary_table.tex\"",
")",
",",
"nb_samples",
"=",
"kwargs",
"[",
"\"final_nb_samples\"",
"]",
",",
"nb_markers",
"=",
"kwargs",
"[",
"\"final_nb_markers\"",
"]",
",",
")",
",",
"report_title",
"=",
"latex",
".",
"sanitize_tex",
"(",
"kwargs",
"[",
"\"report_title\"",
"]",
")",
",",
"report_author",
"=",
"latex",
".",
"sanitize_tex",
"(",
"kwargs",
"[",
"\"report_author\"",
"]",
")",
",",
"initial_files",
"=",
"initial_files",
",",
"final_files",
"=",
"final_files",
",",
"final_nb_samples",
"=",
"kwargs",
"[",
"\"final_nb_samples\"",
"]",
",",
"final_nb_markers",
"=",
"kwargs",
"[",
"\"final_nb_markers\"",
"]",
",",
"graphic_paths",
"=",
"graphic_paths",
",",
")",
"except",
"IOError",
":",
"msg",
"=",
"\"{}: could not create report\"",
".",
"format",
"(",
"report_filename",
")",
"raise",
"ProgramError",
"(",
"msg",
")"
] |
Creates a LaTeX report.
:param report_filename: the name of the file.
:param outdirname: the name of the output directory.
:type report_filename: str
:type outdirname: str
|
[
"Creates",
"a",
"LaTeX",
"report",
"."
] |
python
|
train
| 38.341935 |
blockstack/blockstack-core
|
blockstack/lib/nameset/db.py
|
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/db.py#L840-L854
|
def namedb_namespace_fields_check( namespace_rec ):
"""
Given a namespace record, make sure the following fields are present:
* namespace_id
* buckets
Makes the record suitable for insertion/update.
NOTE: MODIFIES namespace_rec
"""
assert namespace_rec.has_key('namespace_id'), "BUG: namespace record has no ID"
assert namespace_rec.has_key('buckets'), 'BUG: missing price buckets'
assert isinstance(namespace_rec['buckets'], str), 'BUG: namespace data is not in canonical form'
return namespace_rec
|
[
"def",
"namedb_namespace_fields_check",
"(",
"namespace_rec",
")",
":",
"assert",
"namespace_rec",
".",
"has_key",
"(",
"'namespace_id'",
")",
",",
"\"BUG: namespace record has no ID\"",
"assert",
"namespace_rec",
".",
"has_key",
"(",
"'buckets'",
")",
",",
"'BUG: missing price buckets'",
"assert",
"isinstance",
"(",
"namespace_rec",
"[",
"'buckets'",
"]",
",",
"str",
")",
",",
"'BUG: namespace data is not in canonical form'",
"return",
"namespace_rec"
] |
Given a namespace record, make sure the following fields are present:
* namespace_id
* buckets
Makes the record suitable for insertion/update.
NOTE: MODIFIES namespace_rec
|
[
"Given",
"a",
"namespace",
"record",
"make",
"sure",
"the",
"following",
"fields",
"are",
"present",
":",
"*",
"namespace_id",
"*",
"buckets"
] |
python
|
train
| 35.466667 |
clinicedc/edc-notification
|
edc_notification/notification/notification.py
|
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/notification/notification.py#L256-L274
|
def sms_recipients(self):
"""Returns a list of recipients subscribed to receive SMS's
for this "notifications" class.
See also: edc_auth.UserProfile.
"""
sms_recipients = []
UserProfile = django_apps.get_model("edc_auth.UserProfile")
for user_profile in UserProfile.objects.filter(
user__is_active=True, user__is_staff=True
):
try:
user_profile.sms_notifications.get(name=self.name)
except ObjectDoesNotExist:
pass
else:
if user_profile.mobile:
sms_recipients.append(user_profile.mobile)
return sms_recipients
|
[
"def",
"sms_recipients",
"(",
"self",
")",
":",
"sms_recipients",
"=",
"[",
"]",
"UserProfile",
"=",
"django_apps",
".",
"get_model",
"(",
"\"edc_auth.UserProfile\"",
")",
"for",
"user_profile",
"in",
"UserProfile",
".",
"objects",
".",
"filter",
"(",
"user__is_active",
"=",
"True",
",",
"user__is_staff",
"=",
"True",
")",
":",
"try",
":",
"user_profile",
".",
"sms_notifications",
".",
"get",
"(",
"name",
"=",
"self",
".",
"name",
")",
"except",
"ObjectDoesNotExist",
":",
"pass",
"else",
":",
"if",
"user_profile",
".",
"mobile",
":",
"sms_recipients",
".",
"append",
"(",
"user_profile",
".",
"mobile",
")",
"return",
"sms_recipients"
] |
Returns a list of recipients subscribed to receive SMS's
for this "notifications" class.
See also: edc_auth.UserProfile.
|
[
"Returns",
"a",
"list",
"of",
"recipients",
"subscribed",
"to",
"receive",
"SMS",
"s",
"for",
"this",
"notifications",
"class",
"."
] |
python
|
train
| 35.789474 |
eavanvalkenburg/brunt-api
|
brunt/brunt.py
|
https://github.com/eavanvalkenburg/brunt-api/blob/c6bae43f56e0fd8f79b7af67d524611dd104dafa/brunt/brunt.py#L121-L127
|
def _is_logged_in(self):
""" Check whether or not the user is logged in. """
# if the user has not logged in in 24 hours, relogin
if not self._http._has_session() or datetime.utcnow() >= self._lastlogin + timedelta(hours=24):
return self._login()
else:
return {}
|
[
"def",
"_is_logged_in",
"(",
"self",
")",
":",
"# if the user has not logged in in 24 hours, relogin",
"if",
"not",
"self",
".",
"_http",
".",
"_has_session",
"(",
")",
"or",
"datetime",
".",
"utcnow",
"(",
")",
">=",
"self",
".",
"_lastlogin",
"+",
"timedelta",
"(",
"hours",
"=",
"24",
")",
":",
"return",
"self",
".",
"_login",
"(",
")",
"else",
":",
"return",
"{",
"}"
] |
Check whether or not the user is logged in.
|
[
"Check",
"whether",
"or",
"not",
"the",
"user",
"is",
"logged",
"in",
"."
] |
python
|
train
| 44.571429 |
ipython/ipynb
|
ipynb/utils.py
|
https://github.com/ipython/ipynb/blob/2f1526a447104d7d7b97e2a8ab66bee8d2da90ad/ipynb/utils.py#L43-L70
|
def filter_ast(module_ast):
"""
Filters a given module ast, removing non-whitelisted nodes
It allows only the following top level items:
- imports
- function definitions
- class definitions
- top level assignments where all the targets on the LHS are all caps
"""
def node_predicate(node):
"""
Return true if given node is whitelisted
"""
for an in ALLOWED_NODES:
if isinstance(node, an):
return True
# Recurse through Assign node LHS targets when an id is not specified,
# otherwise check that the id is uppercase
if isinstance(node, ast.Assign):
return all([node_predicate(t) for t in node.targets if not hasattr(t, 'id')]) \
and all([t.id.isupper() for t in node.targets if hasattr(t, 'id')])
return False
module_ast.body = [n for n in module_ast.body if node_predicate(n)]
return module_ast
|
[
"def",
"filter_ast",
"(",
"module_ast",
")",
":",
"def",
"node_predicate",
"(",
"node",
")",
":",
"\"\"\"\n Return true if given node is whitelisted\n \"\"\"",
"for",
"an",
"in",
"ALLOWED_NODES",
":",
"if",
"isinstance",
"(",
"node",
",",
"an",
")",
":",
"return",
"True",
"# Recurse through Assign node LHS targets when an id is not specified,",
"# otherwise check that the id is uppercase",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Assign",
")",
":",
"return",
"all",
"(",
"[",
"node_predicate",
"(",
"t",
")",
"for",
"t",
"in",
"node",
".",
"targets",
"if",
"not",
"hasattr",
"(",
"t",
",",
"'id'",
")",
"]",
")",
"and",
"all",
"(",
"[",
"t",
".",
"id",
".",
"isupper",
"(",
")",
"for",
"t",
"in",
"node",
".",
"targets",
"if",
"hasattr",
"(",
"t",
",",
"'id'",
")",
"]",
")",
"return",
"False",
"module_ast",
".",
"body",
"=",
"[",
"n",
"for",
"n",
"in",
"module_ast",
".",
"body",
"if",
"node_predicate",
"(",
"n",
")",
"]",
"return",
"module_ast"
] |
Filters a given module ast, removing non-whitelisted nodes
It allows only the following top level items:
- imports
- function definitions
- class definitions
- top level assignments where all the targets on the LHS are all caps
|
[
"Filters",
"a",
"given",
"module",
"ast",
"removing",
"non",
"-",
"whitelisted",
"nodes"
] |
python
|
train
| 33.535714 |
python-rope/rope
|
rope/contrib/codeassist.py
|
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/contrib/codeassist.py#L43-L58
|
def starting_offset(source_code, offset):
"""Return the offset in which the completion should be inserted
Usually code assist proposals should be inserted like::
completion = proposal.name
result = (source_code[:starting_offset] +
completion + source_code[offset:])
Where starting_offset is the offset returned by this function.
"""
word_finder = worder.Worder(source_code, True)
expression, starting, starting_offset = \
word_finder.get_splitted_primary_before(offset)
return starting_offset
|
[
"def",
"starting_offset",
"(",
"source_code",
",",
"offset",
")",
":",
"word_finder",
"=",
"worder",
".",
"Worder",
"(",
"source_code",
",",
"True",
")",
"expression",
",",
"starting",
",",
"starting_offset",
"=",
"word_finder",
".",
"get_splitted_primary_before",
"(",
"offset",
")",
"return",
"starting_offset"
] |
Return the offset in which the completion should be inserted
Usually code assist proposals should be inserted like::
completion = proposal.name
result = (source_code[:starting_offset] +
completion + source_code[offset:])
Where starting_offset is the offset returned by this function.
|
[
"Return",
"the",
"offset",
"in",
"which",
"the",
"completion",
"should",
"be",
"inserted"
] |
python
|
train
| 34.4375 |
msuozzo/Aduro
|
aduro/manager.py
|
https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/manager.py#L36-L70
|
def detect_events(self, max_attempts=3):
"""Returns a list of `Event`s detected from differences in state
between the current snapshot and the Kindle Library.
`books` and `progress` attributes will be set with the latest API
results upon successful completion of the function.
Returns:
If failed to retrieve progress, None
Else, the list of `Event`s
"""
# Attempt to retrieve current state from KindleAPI
for _ in xrange(max_attempts):
try:
with KindleCloudReaderAPI\
.get_instance(self.uname, self.pword) as kcr:
self.books = kcr.get_library_metadata()
self.progress = kcr.get_library_progress()
except KindleAPIError:
continue
else:
break
else:
return None
# Calculate diffs from new progress
progress_map = {book.asin: self.progress[book.asin].locs[1]
for book in self.books}
new_events = self._snapshot.calc_update_events(progress_map)
update_event = UpdateEvent(datetime.now().replace(microsecond=0))
new_events.append(update_event)
self._event_buf.extend(new_events)
return new_events
|
[
"def",
"detect_events",
"(",
"self",
",",
"max_attempts",
"=",
"3",
")",
":",
"# Attempt to retrieve current state from KindleAPI",
"for",
"_",
"in",
"xrange",
"(",
"max_attempts",
")",
":",
"try",
":",
"with",
"KindleCloudReaderAPI",
".",
"get_instance",
"(",
"self",
".",
"uname",
",",
"self",
".",
"pword",
")",
"as",
"kcr",
":",
"self",
".",
"books",
"=",
"kcr",
".",
"get_library_metadata",
"(",
")",
"self",
".",
"progress",
"=",
"kcr",
".",
"get_library_progress",
"(",
")",
"except",
"KindleAPIError",
":",
"continue",
"else",
":",
"break",
"else",
":",
"return",
"None",
"# Calculate diffs from new progress",
"progress_map",
"=",
"{",
"book",
".",
"asin",
":",
"self",
".",
"progress",
"[",
"book",
".",
"asin",
"]",
".",
"locs",
"[",
"1",
"]",
"for",
"book",
"in",
"self",
".",
"books",
"}",
"new_events",
"=",
"self",
".",
"_snapshot",
".",
"calc_update_events",
"(",
"progress_map",
")",
"update_event",
"=",
"UpdateEvent",
"(",
"datetime",
".",
"now",
"(",
")",
".",
"replace",
"(",
"microsecond",
"=",
"0",
")",
")",
"new_events",
".",
"append",
"(",
"update_event",
")",
"self",
".",
"_event_buf",
".",
"extend",
"(",
"new_events",
")",
"return",
"new_events"
] |
Returns a list of `Event`s detected from differences in state
between the current snapshot and the Kindle Library.
`books` and `progress` attributes will be set with the latest API
results upon successful completion of the function.
Returns:
If failed to retrieve progress, None
Else, the list of `Event`s
|
[
"Returns",
"a",
"list",
"of",
"Event",
"s",
"detected",
"from",
"differences",
"in",
"state",
"between",
"the",
"current",
"snapshot",
"and",
"the",
"Kindle",
"Library",
"."
] |
python
|
train
| 37.742857 |
emirozer/fake2db
|
fake2db/sqlite_handler.py
|
https://github.com/emirozer/fake2db/blob/568cf42afb3ac10fc15c4faaa1cdb84fc1f4946c/fake2db/sqlite_handler.py#L140-L161
|
def data_filler_user_agent(self, number_of_rows, conn):
'''creates and fills the table with user agent data
'''
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE user_agent(id TEXT PRIMARY KEY,
ip TEXT, countrycode TEXT, useragent TEXT)
''')
conn.commit()
multi_lines = []
try:
for i in range(0, number_of_rows):
multi_lines.append((rnd_id_generator(self), self.faker.ipv4(), self.faker.country_code(),
self.faker.user_agent()))
cursor.executemany('insert into user_agent values(?,?,?,?)', multi_lines)
conn.commit()
logger.warning('user_agent Commits are successful after write job!', extra=d)
except Exception as e:
logger.error(e, extra=d)
|
[
"def",
"data_filler_user_agent",
"(",
"self",
",",
"number_of_rows",
",",
"conn",
")",
":",
"cursor",
"=",
"conn",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"'''\n CREATE TABLE user_agent(id TEXT PRIMARY KEY,\n ip TEXT, countrycode TEXT, useragent TEXT)\n '''",
")",
"conn",
".",
"commit",
"(",
")",
"multi_lines",
"=",
"[",
"]",
"try",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"number_of_rows",
")",
":",
"multi_lines",
".",
"append",
"(",
"(",
"rnd_id_generator",
"(",
"self",
")",
",",
"self",
".",
"faker",
".",
"ipv4",
"(",
")",
",",
"self",
".",
"faker",
".",
"country_code",
"(",
")",
",",
"self",
".",
"faker",
".",
"user_agent",
"(",
")",
")",
")",
"cursor",
".",
"executemany",
"(",
"'insert into user_agent values(?,?,?,?)'",
",",
"multi_lines",
")",
"conn",
".",
"commit",
"(",
")",
"logger",
".",
"warning",
"(",
"'user_agent Commits are successful after write job!'",
",",
"extra",
"=",
"d",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"e",
",",
"extra",
"=",
"d",
")"
] |
creates and fills the table with user agent data
|
[
"creates",
"and",
"fills",
"the",
"table",
"with",
"user",
"agent",
"data"
] |
python
|
train
| 38.772727 |
tensorflow/cleverhans
|
cleverhans/utils_tf.py
|
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tf.py#L304-L324
|
def model_argmax(sess, x, predictions, samples, feed=None):
"""
Helper function that computes the current class prediction
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param samples: numpy array with input samples (dims must match x)
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:return: the argmax output of predictions, i.e. the current predicted class
"""
feed_dict = {x: samples}
if feed is not None:
feed_dict.update(feed)
probabilities = sess.run(predictions, feed_dict)
if samples.shape[0] == 1:
return np.argmax(probabilities)
else:
return np.argmax(probabilities, axis=1)
|
[
"def",
"model_argmax",
"(",
"sess",
",",
"x",
",",
"predictions",
",",
"samples",
",",
"feed",
"=",
"None",
")",
":",
"feed_dict",
"=",
"{",
"x",
":",
"samples",
"}",
"if",
"feed",
"is",
"not",
"None",
":",
"feed_dict",
".",
"update",
"(",
"feed",
")",
"probabilities",
"=",
"sess",
".",
"run",
"(",
"predictions",
",",
"feed_dict",
")",
"if",
"samples",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"return",
"np",
".",
"argmax",
"(",
"probabilities",
")",
"else",
":",
"return",
"np",
".",
"argmax",
"(",
"probabilities",
",",
"axis",
"=",
"1",
")"
] |
Helper function that computes the current class prediction
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param samples: numpy array with input samples (dims must match x)
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:return: the argmax output of predictions, i.e. the current predicted class
|
[
"Helper",
"function",
"that",
"computes",
"the",
"current",
"class",
"prediction",
":",
"param",
"sess",
":",
"TF",
"session",
":",
"param",
"x",
":",
"the",
"input",
"placeholder",
":",
"param",
"predictions",
":",
"the",
"model",
"s",
"symbolic",
"output",
":",
"param",
"samples",
":",
"numpy",
"array",
"with",
"input",
"samples",
"(",
"dims",
"must",
"match",
"x",
")",
":",
"param",
"feed",
":",
"An",
"optional",
"dictionary",
"that",
"is",
"appended",
"to",
"the",
"feeding",
"dictionary",
"before",
"the",
"session",
"runs",
".",
"Can",
"be",
"used",
"to",
"feed",
"the",
"learning",
"phase",
"of",
"a",
"Keras",
"model",
"for",
"instance",
".",
":",
"return",
":",
"the",
"argmax",
"output",
"of",
"predictions",
"i",
".",
"e",
".",
"the",
"current",
"predicted",
"class"
] |
python
|
train
| 38.666667 |
CSchoel/nolds
|
nolds/measures.py
|
https://github.com/CSchoel/nolds/blob/8a5ecc472d67ac08b571bd68967287668ca9058e/nolds/measures.py#L839-L874
|
def logmid_n(max_n, ratio=1/4.0, nsteps=15):
"""
Creates an array of integers that lie evenly spaced in the "middle" of the
logarithmic scale from 0 to log(max_n).
If max_n is very small and/or nsteps is very large, this may lead to
duplicate values which will be removed from the output.
This function has benefits in hurst_rs, because it cuts away both very small
and very large n, which both can cause problems, and still produces a
logarithmically spaced sequence.
Args:
max_n (int):
largest possible output value (should be the sequence length when used in
hurst_rs)
Kwargs:
ratio (float):
width of the "middle" of the logarithmic interval relative to log(max_n).
For example, for ratio=1/2.0 the logarithm of the resulting values will
lie between 0.25 * log(max_n) and 0.75 * log(max_n).
nsteps (float):
(maximum) number of values to take from the specified range
Returns:
array of int:
a logarithmically spaced sequence of at most nsteps values (may be less,
because only unique values are returned)
"""
l = np.log(max_n)
span = l * ratio
start = l * (1 - ratio) * 0.5
midrange = start + 1.0*np.arange(nsteps)/nsteps*span
nvals = np.round(np.exp(midrange)).astype("int32")
return np.unique(nvals)
|
[
"def",
"logmid_n",
"(",
"max_n",
",",
"ratio",
"=",
"1",
"/",
"4.0",
",",
"nsteps",
"=",
"15",
")",
":",
"l",
"=",
"np",
".",
"log",
"(",
"max_n",
")",
"span",
"=",
"l",
"*",
"ratio",
"start",
"=",
"l",
"*",
"(",
"1",
"-",
"ratio",
")",
"*",
"0.5",
"midrange",
"=",
"start",
"+",
"1.0",
"*",
"np",
".",
"arange",
"(",
"nsteps",
")",
"/",
"nsteps",
"*",
"span",
"nvals",
"=",
"np",
".",
"round",
"(",
"np",
".",
"exp",
"(",
"midrange",
")",
")",
".",
"astype",
"(",
"\"int32\"",
")",
"return",
"np",
".",
"unique",
"(",
"nvals",
")"
] |
Creates an array of integers that lie evenly spaced in the "middle" of the
logarithmic scale from 0 to log(max_n).
If max_n is very small and/or nsteps is very large, this may lead to
duplicate values which will be removed from the output.
This function has benefits in hurst_rs, because it cuts away both very small
and very large n, which both can cause problems, and still produces a
logarithmically spaced sequence.
Args:
max_n (int):
largest possible output value (should be the sequence length when used in
hurst_rs)
Kwargs:
ratio (float):
width of the "middle" of the logarithmic interval relative to log(max_n).
For example, for ratio=1/2.0 the logarithm of the resulting values will
lie between 0.25 * log(max_n) and 0.75 * log(max_n).
nsteps (float):
(maximum) number of values to take from the specified range
Returns:
array of int:
a logarithmically spaced sequence of at most nsteps values (may be less,
because only unique values are returned)
|
[
"Creates",
"an",
"array",
"of",
"integers",
"that",
"lie",
"evenly",
"spaced",
"in",
"the",
"middle",
"of",
"the",
"logarithmic",
"scale",
"from",
"0",
"to",
"log",
"(",
"max_n",
")",
"."
] |
python
|
train
| 35.361111 |
kmmbvnr/django-any
|
django_any/models.py
|
https://github.com/kmmbvnr/django-any/blob/6f64ebd05476e2149e2e71deeefbb10f8edfc412/django_any/models.py#L86-L98
|
def any_positiveinteger_field(field, **kwargs):
"""
An positive integer
>>> result = any_field(models.PositiveIntegerField())
>>> type(result)
<type 'int'>
>>> result > 0
True
"""
min_value = kwargs.get('min_value', 1)
max_value = kwargs.get('max_value', 9999)
return xunit.any_int(min_value=min_value, max_value=max_value)
|
[
"def",
"any_positiveinteger_field",
"(",
"field",
",",
"*",
"*",
"kwargs",
")",
":",
"min_value",
"=",
"kwargs",
".",
"get",
"(",
"'min_value'",
",",
"1",
")",
"max_value",
"=",
"kwargs",
".",
"get",
"(",
"'max_value'",
",",
"9999",
")",
"return",
"xunit",
".",
"any_int",
"(",
"min_value",
"=",
"min_value",
",",
"max_value",
"=",
"max_value",
")"
] |
An positive integer
>>> result = any_field(models.PositiveIntegerField())
>>> type(result)
<type 'int'>
>>> result > 0
True
|
[
"An",
"positive",
"integer",
">>>",
"result",
"=",
"any_field",
"(",
"models",
".",
"PositiveIntegerField",
"()",
")",
">>>",
"type",
"(",
"result",
")",
"<type",
"int",
">",
">>>",
"result",
">",
"0",
"True"
] |
python
|
test
| 28.307692 |
cmbruns/pyopenvr
|
src/openvr/__init__.py
|
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L5780-L5794
|
def takeStereoScreenshot(self, pchPreviewFilename, pchVRFilename):
"""
Tells the compositor to take an internal screenshot of
type VRScreenshotType_Stereo. It will take the current
submitted scene textures of the running application and
write them into the preview image and a side-by-side file
for the VR image.
This is similar to request screenshot, but doesn't ever
talk to the application, just takes the shot and submits.
"""
fn = self.function_table.takeStereoScreenshot
pOutScreenshotHandle = ScreenshotHandle_t()
result = fn(byref(pOutScreenshotHandle), pchPreviewFilename, pchVRFilename)
return result, pOutScreenshotHandle
|
[
"def",
"takeStereoScreenshot",
"(",
"self",
",",
"pchPreviewFilename",
",",
"pchVRFilename",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"takeStereoScreenshot",
"pOutScreenshotHandle",
"=",
"ScreenshotHandle_t",
"(",
")",
"result",
"=",
"fn",
"(",
"byref",
"(",
"pOutScreenshotHandle",
")",
",",
"pchPreviewFilename",
",",
"pchVRFilename",
")",
"return",
"result",
",",
"pOutScreenshotHandle"
] |
Tells the compositor to take an internal screenshot of
type VRScreenshotType_Stereo. It will take the current
submitted scene textures of the running application and
write them into the preview image and a side-by-side file
for the VR image.
This is similar to request screenshot, but doesn't ever
talk to the application, just takes the shot and submits.
|
[
"Tells",
"the",
"compositor",
"to",
"take",
"an",
"internal",
"screenshot",
"of",
"type",
"VRScreenshotType_Stereo",
".",
"It",
"will",
"take",
"the",
"current",
"submitted",
"scene",
"textures",
"of",
"the",
"running",
"application",
"and",
"write",
"them",
"into",
"the",
"preview",
"image",
"and",
"a",
"side",
"-",
"by",
"-",
"side",
"file",
"for",
"the",
"VR",
"image",
".",
"This",
"is",
"similar",
"to",
"request",
"screenshot",
"but",
"doesn",
"t",
"ever",
"talk",
"to",
"the",
"application",
"just",
"takes",
"the",
"shot",
"and",
"submits",
"."
] |
python
|
train
| 48.6 |
angr/angr
|
angr/state_plugins/symbolic_memory.py
|
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/symbolic_memory.py#L412-L427
|
def concretize_read_addr(self, addr, strategies=None):
"""
Concretizes an address meant for reading.
:param addr: An expression for the address.
:param strategies: A list of concretization strategies (to override the default).
:returns: A list of concrete addresses.
"""
if isinstance(addr, int):
return [ addr ]
elif not self.state.solver.symbolic(addr):
return [ self.state.solver.eval(addr) ]
strategies = self.read_strategies if strategies is None else strategies
return self._apply_concretization_strategies(addr, strategies, 'load')
|
[
"def",
"concretize_read_addr",
"(",
"self",
",",
"addr",
",",
"strategies",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"addr",
",",
"int",
")",
":",
"return",
"[",
"addr",
"]",
"elif",
"not",
"self",
".",
"state",
".",
"solver",
".",
"symbolic",
"(",
"addr",
")",
":",
"return",
"[",
"self",
".",
"state",
".",
"solver",
".",
"eval",
"(",
"addr",
")",
"]",
"strategies",
"=",
"self",
".",
"read_strategies",
"if",
"strategies",
"is",
"None",
"else",
"strategies",
"return",
"self",
".",
"_apply_concretization_strategies",
"(",
"addr",
",",
"strategies",
",",
"'load'",
")"
] |
Concretizes an address meant for reading.
:param addr: An expression for the address.
:param strategies: A list of concretization strategies (to override the default).
:returns: A list of concrete addresses.
|
[
"Concretizes",
"an",
"address",
"meant",
"for",
"reading",
"."
] |
python
|
train
| 42 |
tkf/python-epc
|
epc/core.py
|
https://github.com/tkf/python-epc/blob/f3673ae5c35f20a0f71546ab34c28e3dde3595c1/epc/core.py#L75-L86
|
def get_method(self, name):
"""
Get registered method callend `name`.
"""
try:
return self.funcs[name]
except KeyError:
try:
return self.instance._get_method(name)
except AttributeError:
return SimpleXMLRPCServer.resolve_dotted_attribute(
self.instance, name, self.allow_dotted_names)
|
[
"def",
"get_method",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"return",
"self",
".",
"funcs",
"[",
"name",
"]",
"except",
"KeyError",
":",
"try",
":",
"return",
"self",
".",
"instance",
".",
"_get_method",
"(",
"name",
")",
"except",
"AttributeError",
":",
"return",
"SimpleXMLRPCServer",
".",
"resolve_dotted_attribute",
"(",
"self",
".",
"instance",
",",
"name",
",",
"self",
".",
"allow_dotted_names",
")"
] |
Get registered method callend `name`.
|
[
"Get",
"registered",
"method",
"callend",
"name",
"."
] |
python
|
train
| 33.416667 |
blockadeio/analyst_toolbench
|
blockade/common/utils.py
|
https://github.com/blockadeio/analyst_toolbench/blob/159b6f8cf8a91c5ff050f1579636ea90ab269863/blockade/common/utils.py#L101-L114
|
def get_logger(name):
"""Get a logging instance we can use."""
import logging
import sys
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
shandler = logging.StreamHandler(sys.stdout)
fmt = ""
fmt += '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'
fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s'
fmtr = logging.Formatter(fmt)
shandler.setFormatter(fmtr)
logger.addHandler(shandler)
return logger
|
[
"def",
"get_logger",
"(",
"name",
")",
":",
"import",
"logging",
"import",
"sys",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"shandler",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stdout",
")",
"fmt",
"=",
"\"\"",
"fmt",
"+=",
"'\\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'",
"fmt",
"+=",
"'%(lineno)d %(asctime)s\\033[0m| %(message)s'",
"fmtr",
"=",
"logging",
".",
"Formatter",
"(",
"fmt",
")",
"shandler",
".",
"setFormatter",
"(",
"fmtr",
")",
"logger",
".",
"addHandler",
"(",
"shandler",
")",
"return",
"logger"
] |
Get a logging instance we can use.
|
[
"Get",
"a",
"logging",
"instance",
"we",
"can",
"use",
"."
] |
python
|
train
| 32.785714 |
DataBiosphere/toil
|
src/toil/cwl/cwltoil.py
|
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/cwl/cwltoil.py#L345-L365
|
def uploadFile(uploadfunc, fileindex, existing, uf, skip_broken=False):
"""Update a file object so that the location is a reference to the toil file
store, writing it to the file store if necessary.
"""
if uf["location"].startswith("toilfs:") or uf["location"].startswith("_:"):
return
if uf["location"] in fileindex:
uf["location"] = fileindex[uf["location"]]
return
if not uf["location"] and uf["path"]:
uf["location"] = schema_salad.ref_resolver.file_uri(uf["path"])
if uf["location"].startswith("file://") and not os.path.isfile(uf["location"][7:]):
if skip_broken:
return
else:
raise cwltool.errors.WorkflowException(
"File is missing: %s" % uf["location"])
uf["location"] = write_file(
uploadfunc, fileindex, existing, uf["location"])
|
[
"def",
"uploadFile",
"(",
"uploadfunc",
",",
"fileindex",
",",
"existing",
",",
"uf",
",",
"skip_broken",
"=",
"False",
")",
":",
"if",
"uf",
"[",
"\"location\"",
"]",
".",
"startswith",
"(",
"\"toilfs:\"",
")",
"or",
"uf",
"[",
"\"location\"",
"]",
".",
"startswith",
"(",
"\"_:\"",
")",
":",
"return",
"if",
"uf",
"[",
"\"location\"",
"]",
"in",
"fileindex",
":",
"uf",
"[",
"\"location\"",
"]",
"=",
"fileindex",
"[",
"uf",
"[",
"\"location\"",
"]",
"]",
"return",
"if",
"not",
"uf",
"[",
"\"location\"",
"]",
"and",
"uf",
"[",
"\"path\"",
"]",
":",
"uf",
"[",
"\"location\"",
"]",
"=",
"schema_salad",
".",
"ref_resolver",
".",
"file_uri",
"(",
"uf",
"[",
"\"path\"",
"]",
")",
"if",
"uf",
"[",
"\"location\"",
"]",
".",
"startswith",
"(",
"\"file://\"",
")",
"and",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"uf",
"[",
"\"location\"",
"]",
"[",
"7",
":",
"]",
")",
":",
"if",
"skip_broken",
":",
"return",
"else",
":",
"raise",
"cwltool",
".",
"errors",
".",
"WorkflowException",
"(",
"\"File is missing: %s\"",
"%",
"uf",
"[",
"\"location\"",
"]",
")",
"uf",
"[",
"\"location\"",
"]",
"=",
"write_file",
"(",
"uploadfunc",
",",
"fileindex",
",",
"existing",
",",
"uf",
"[",
"\"location\"",
"]",
")"
] |
Update a file object so that the location is a reference to the toil file
store, writing it to the file store if necessary.
|
[
"Update",
"a",
"file",
"object",
"so",
"that",
"the",
"location",
"is",
"a",
"reference",
"to",
"the",
"toil",
"file",
"store",
"writing",
"it",
"to",
"the",
"file",
"store",
"if",
"necessary",
"."
] |
python
|
train
| 40.47619 |
yyuu/botornado
|
boto/pyami/installers/__init__.py
|
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/pyami/installers/__init__.py#L30-L34
|
def add_cron(self, name, minute, hour, mday, month, wday, who, command, env=None):
"""
Add an entry to the system crontab.
"""
raise NotImplementedError
|
[
"def",
"add_cron",
"(",
"self",
",",
"name",
",",
"minute",
",",
"hour",
",",
"mday",
",",
"month",
",",
"wday",
",",
"who",
",",
"command",
",",
"env",
"=",
"None",
")",
":",
"raise",
"NotImplementedError"
] |
Add an entry to the system crontab.
|
[
"Add",
"an",
"entry",
"to",
"the",
"system",
"crontab",
"."
] |
python
|
train
| 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.