repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
teepark/greenhouse
|
greenhouse/ext/psycopg2.py
|
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/ext/psycopg2.py#L12-L32
|
def wait_callback(connection):
"""callback function suitable for ``psycopg2.set_wait_callback``
pass this function to ``psycopg2.extensions.set_wait_callack`` to force any
blocking operations from psycopg2 to only block the current coroutine,
rather than the entire thread or process
to undo the change and return to normal blocking operation, use
`psycopg2.extensions.set_wait_callback(None)``
"""
while 1:
state = connection.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
descriptor.wait_fds([(connection.fileno(), 1)])
elif state == extensions.POLL_WRITE:
descriptor.wait_fds([(connection.fileno(), 2)])
else:
raise psycopg2.OperationalError("Bad poll result: %r" % state)
|
[
"def",
"wait_callback",
"(",
"connection",
")",
":",
"while",
"1",
":",
"state",
"=",
"connection",
".",
"poll",
"(",
")",
"if",
"state",
"==",
"extensions",
".",
"POLL_OK",
":",
"break",
"elif",
"state",
"==",
"extensions",
".",
"POLL_READ",
":",
"descriptor",
".",
"wait_fds",
"(",
"[",
"(",
"connection",
".",
"fileno",
"(",
")",
",",
"1",
")",
"]",
")",
"elif",
"state",
"==",
"extensions",
".",
"POLL_WRITE",
":",
"descriptor",
".",
"wait_fds",
"(",
"[",
"(",
"connection",
".",
"fileno",
"(",
")",
",",
"2",
")",
"]",
")",
"else",
":",
"raise",
"psycopg2",
".",
"OperationalError",
"(",
"\"Bad poll result: %r\"",
"%",
"state",
")"
] |
callback function suitable for ``psycopg2.set_wait_callback``
pass this function to ``psycopg2.extensions.set_wait_callack`` to force any
blocking operations from psycopg2 to only block the current coroutine,
rather than the entire thread or process
to undo the change and return to normal blocking operation, use
`psycopg2.extensions.set_wait_callback(None)``
|
[
"callback",
"function",
"suitable",
"for",
"psycopg2",
".",
"set_wait_callback"
] |
python
|
train
|
ulule/django-badgify
|
badgify/recipe.py
|
https://github.com/ulule/django-badgify/blob/1bf233ffeb6293ee659454de7b3794682128b6ca/badgify/recipe.py#L101-L119
|
def can_perform_awarding(self):
"""
Checks if we can perform awarding process (is ``user_ids`` property
defined? Does Badge object exists? and so on). If we can perform db
operations safely, returns ``True``. Otherwise, ``False``.
"""
if not self.user_ids:
logger.debug(
'✘ Badge %s: no users to check (empty user_ids property)',
self.slug)
return False
if not self.badge:
logger.debug(
'✘ Badge %s: does not exist in the database (run badgify_sync badges)',
self.slug)
return False
return True
|
[
"def",
"can_perform_awarding",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"user_ids",
":",
"logger",
".",
"debug",
"(",
"'✘ Badge %s: no users to check (empty user_ids property)',",
"",
"self",
".",
"slug",
")",
"return",
"False",
"if",
"not",
"self",
".",
"badge",
":",
"logger",
".",
"debug",
"(",
"'✘ Badge %s: does not exist in the database (run badgify_sync badges)',",
"",
"self",
".",
"slug",
")",
"return",
"False",
"return",
"True"
] |
Checks if we can perform awarding process (is ``user_ids`` property
defined? Does Badge object exists? and so on). If we can perform db
operations safely, returns ``True``. Otherwise, ``False``.
|
[
"Checks",
"if",
"we",
"can",
"perform",
"awarding",
"process",
"(",
"is",
"user_ids",
"property",
"defined?",
"Does",
"Badge",
"object",
"exists?",
"and",
"so",
"on",
")",
".",
"If",
"we",
"can",
"perform",
"db",
"operations",
"safely",
"returns",
"True",
".",
"Otherwise",
"False",
"."
] |
python
|
train
|
ciena/afkak
|
afkak/kafkacodec.py
|
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/kafkacodec.py#L348-L369
|
def decode_offset_response(cls, data):
"""
Decode bytes to an :class:`OffsetResponse`
:param bytes data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for _i in range(num_topics):
(topic, cur) = read_short_ascii(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _i in range(num_partitions):
((partition, error, num_offsets), cur) = \
relative_unpack('>ihi', data, cur)
offsets = []
for _i in range(num_offsets):
((offset,), cur) = relative_unpack('>q', data, cur)
offsets.append(offset)
yield OffsetResponse(topic, partition, error, tuple(offsets))
|
[
"def",
"decode_offset_response",
"(",
"cls",
",",
"data",
")",
":",
"(",
"(",
"correlation_id",
",",
"num_topics",
")",
",",
"cur",
")",
"=",
"relative_unpack",
"(",
"'>ii'",
",",
"data",
",",
"0",
")",
"for",
"_i",
"in",
"range",
"(",
"num_topics",
")",
":",
"(",
"topic",
",",
"cur",
")",
"=",
"read_short_ascii",
"(",
"data",
",",
"cur",
")",
"(",
"(",
"num_partitions",
",",
")",
",",
"cur",
")",
"=",
"relative_unpack",
"(",
"'>i'",
",",
"data",
",",
"cur",
")",
"for",
"_i",
"in",
"range",
"(",
"num_partitions",
")",
":",
"(",
"(",
"partition",
",",
"error",
",",
"num_offsets",
")",
",",
"cur",
")",
"=",
"relative_unpack",
"(",
"'>ihi'",
",",
"data",
",",
"cur",
")",
"offsets",
"=",
"[",
"]",
"for",
"_i",
"in",
"range",
"(",
"num_offsets",
")",
":",
"(",
"(",
"offset",
",",
")",
",",
"cur",
")",
"=",
"relative_unpack",
"(",
"'>q'",
",",
"data",
",",
"cur",
")",
"offsets",
".",
"append",
"(",
"offset",
")",
"yield",
"OffsetResponse",
"(",
"topic",
",",
"partition",
",",
"error",
",",
"tuple",
"(",
"offsets",
")",
")"
] |
Decode bytes to an :class:`OffsetResponse`
:param bytes data: bytes to decode
|
[
"Decode",
"bytes",
"to",
"an",
":",
"class",
":",
"OffsetResponse"
] |
python
|
train
|
biocommons/hgvs
|
hgvs/normalizer.py
|
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/normalizer.py#L179-L258
|
def _get_boundary(self, var):
"""Get the position of exon-intron boundary for current variant
"""
if var.type == "r" or var.type == "n":
if self.cross_boundaries:
return 0, float("inf")
else:
# Get genomic sequence access number for this transcript
map_info = self.hdp.get_tx_mapping_options(var.ac)
if not map_info:
raise HGVSDataNotAvailableError(
"No mapping info available for {ac}".format(ac=var.ac))
map_info = [
item for item in map_info if item["alt_aln_method"] == self.alt_aln_method
]
alt_ac = map_info[0]["alt_ac"]
# Get tx info
tx_info = self.hdp.get_tx_info(var.ac, alt_ac, self.alt_aln_method)
cds_start = tx_info["cds_start_i"]
cds_end = tx_info["cds_end_i"]
# Get exon info
exon_info = self.hdp.get_tx_exons(var.ac, alt_ac, self.alt_aln_method)
exon_starts = [exon["tx_start_i"] for exon in exon_info]
exon_ends = [exon["tx_end_i"] for exon in exon_info]
exon_starts.sort()
exon_ends.sort()
exon_starts.append(exon_ends[-1])
exon_ends.append(float("inf"))
# Find the end pos of the exon where the var locates
left = 0
right = float("inf")
# TODO: #242: implement methods to find tx regions
for i, _ in enumerate(exon_starts):
if (var.posedit.pos.start.base - 1 >= exon_starts[i]
and var.posedit.pos.start.base - 1 < exon_ends[i]):
break
for j, _ in enumerate(exon_starts):
if (var.posedit.pos.end.base - 1 >= exon_starts[j]
and var.posedit.pos.end.base - 1 < exon_ends[j]):
break
if i != j:
raise HGVSUnsupportedOperationError(
"Unsupported normalization of variants spanning the exon-intron boundary ({var})"
.format(var=var))
left = exon_starts[i]
right = exon_ends[i]
if cds_start is None:
pass
elif var.posedit.pos.end.base - 1 < cds_start:
right = min(right, cds_start)
elif var.posedit.pos.start.base - 1 >= cds_start:
left = max(left, cds_start)
else:
raise HGVSUnsupportedOperationError(
"Unsupported normalization of variants spanning the UTR-exon boundary ({var})"
.format(var=var))
if cds_end is None:
pass
elif var.posedit.pos.start.base - 1 >= cds_end:
left = max(left, cds_end)
elif var.posedit.pos.end.base - 1 < cds_end:
right = min(right, cds_end)
else:
raise HGVSUnsupportedOperationError(
"Unsupported normalization of variants spanning the exon-UTR boundary ({var})"
.format(var=var))
return left, right
else:
# For variant type of g and m etc.
return 0, float("inf")
|
[
"def",
"_get_boundary",
"(",
"self",
",",
"var",
")",
":",
"if",
"var",
".",
"type",
"==",
"\"r\"",
"or",
"var",
".",
"type",
"==",
"\"n\"",
":",
"if",
"self",
".",
"cross_boundaries",
":",
"return",
"0",
",",
"float",
"(",
"\"inf\"",
")",
"else",
":",
"# Get genomic sequence access number for this transcript",
"map_info",
"=",
"self",
".",
"hdp",
".",
"get_tx_mapping_options",
"(",
"var",
".",
"ac",
")",
"if",
"not",
"map_info",
":",
"raise",
"HGVSDataNotAvailableError",
"(",
"\"No mapping info available for {ac}\"",
".",
"format",
"(",
"ac",
"=",
"var",
".",
"ac",
")",
")",
"map_info",
"=",
"[",
"item",
"for",
"item",
"in",
"map_info",
"if",
"item",
"[",
"\"alt_aln_method\"",
"]",
"==",
"self",
".",
"alt_aln_method",
"]",
"alt_ac",
"=",
"map_info",
"[",
"0",
"]",
"[",
"\"alt_ac\"",
"]",
"# Get tx info",
"tx_info",
"=",
"self",
".",
"hdp",
".",
"get_tx_info",
"(",
"var",
".",
"ac",
",",
"alt_ac",
",",
"self",
".",
"alt_aln_method",
")",
"cds_start",
"=",
"tx_info",
"[",
"\"cds_start_i\"",
"]",
"cds_end",
"=",
"tx_info",
"[",
"\"cds_end_i\"",
"]",
"# Get exon info",
"exon_info",
"=",
"self",
".",
"hdp",
".",
"get_tx_exons",
"(",
"var",
".",
"ac",
",",
"alt_ac",
",",
"self",
".",
"alt_aln_method",
")",
"exon_starts",
"=",
"[",
"exon",
"[",
"\"tx_start_i\"",
"]",
"for",
"exon",
"in",
"exon_info",
"]",
"exon_ends",
"=",
"[",
"exon",
"[",
"\"tx_end_i\"",
"]",
"for",
"exon",
"in",
"exon_info",
"]",
"exon_starts",
".",
"sort",
"(",
")",
"exon_ends",
".",
"sort",
"(",
")",
"exon_starts",
".",
"append",
"(",
"exon_ends",
"[",
"-",
"1",
"]",
")",
"exon_ends",
".",
"append",
"(",
"float",
"(",
"\"inf\"",
")",
")",
"# Find the end pos of the exon where the var locates",
"left",
"=",
"0",
"right",
"=",
"float",
"(",
"\"inf\"",
")",
"# TODO: #242: implement methods to find tx regions",
"for",
"i",
",",
"_",
"in",
"enumerate",
"(",
"exon_starts",
")",
":",
"if",
"(",
"var",
".",
"posedit",
".",
"pos",
".",
"start",
".",
"base",
"-",
"1",
">=",
"exon_starts",
"[",
"i",
"]",
"and",
"var",
".",
"posedit",
".",
"pos",
".",
"start",
".",
"base",
"-",
"1",
"<",
"exon_ends",
"[",
"i",
"]",
")",
":",
"break",
"for",
"j",
",",
"_",
"in",
"enumerate",
"(",
"exon_starts",
")",
":",
"if",
"(",
"var",
".",
"posedit",
".",
"pos",
".",
"end",
".",
"base",
"-",
"1",
">=",
"exon_starts",
"[",
"j",
"]",
"and",
"var",
".",
"posedit",
".",
"pos",
".",
"end",
".",
"base",
"-",
"1",
"<",
"exon_ends",
"[",
"j",
"]",
")",
":",
"break",
"if",
"i",
"!=",
"j",
":",
"raise",
"HGVSUnsupportedOperationError",
"(",
"\"Unsupported normalization of variants spanning the exon-intron boundary ({var})\"",
".",
"format",
"(",
"var",
"=",
"var",
")",
")",
"left",
"=",
"exon_starts",
"[",
"i",
"]",
"right",
"=",
"exon_ends",
"[",
"i",
"]",
"if",
"cds_start",
"is",
"None",
":",
"pass",
"elif",
"var",
".",
"posedit",
".",
"pos",
".",
"end",
".",
"base",
"-",
"1",
"<",
"cds_start",
":",
"right",
"=",
"min",
"(",
"right",
",",
"cds_start",
")",
"elif",
"var",
".",
"posedit",
".",
"pos",
".",
"start",
".",
"base",
"-",
"1",
">=",
"cds_start",
":",
"left",
"=",
"max",
"(",
"left",
",",
"cds_start",
")",
"else",
":",
"raise",
"HGVSUnsupportedOperationError",
"(",
"\"Unsupported normalization of variants spanning the UTR-exon boundary ({var})\"",
".",
"format",
"(",
"var",
"=",
"var",
")",
")",
"if",
"cds_end",
"is",
"None",
":",
"pass",
"elif",
"var",
".",
"posedit",
".",
"pos",
".",
"start",
".",
"base",
"-",
"1",
">=",
"cds_end",
":",
"left",
"=",
"max",
"(",
"left",
",",
"cds_end",
")",
"elif",
"var",
".",
"posedit",
".",
"pos",
".",
"end",
".",
"base",
"-",
"1",
"<",
"cds_end",
":",
"right",
"=",
"min",
"(",
"right",
",",
"cds_end",
")",
"else",
":",
"raise",
"HGVSUnsupportedOperationError",
"(",
"\"Unsupported normalization of variants spanning the exon-UTR boundary ({var})\"",
".",
"format",
"(",
"var",
"=",
"var",
")",
")",
"return",
"left",
",",
"right",
"else",
":",
"# For variant type of g and m etc.",
"return",
"0",
",",
"float",
"(",
"\"inf\"",
")"
] |
Get the position of exon-intron boundary for current variant
|
[
"Get",
"the",
"position",
"of",
"exon",
"-",
"intron",
"boundary",
"for",
"current",
"variant"
] |
python
|
train
|
mitsei/dlkit
|
dlkit/handcar/osid/objects.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/osid/objects.py#L632-L650
|
def set_journal_comment(self, comment=None):
"""Sets a comment.
arg: comment (string): the new comment
raise: InvalidArgument - comment is invalid
raise: NoAccess - metadata.is_readonly() is true
raise: NullArgument - comment is null
compliance: mandatory - This method must be implemented.
"""
if comment is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['comment'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(comment, metadata, array=False):
self._my_map['journalComment']['text'] = comment
else:
raise InvalidArgument()
|
[
"def",
"set_journal_comment",
"(",
"self",
",",
"comment",
"=",
"None",
")",
":",
"if",
"comment",
"is",
"None",
":",
"raise",
"NullArgument",
"(",
")",
"metadata",
"=",
"Metadata",
"(",
"*",
"*",
"settings",
".",
"METADATA",
"[",
"'comment'",
"]",
")",
"if",
"metadata",
".",
"is_read_only",
"(",
")",
":",
"raise",
"NoAccess",
"(",
")",
"if",
"self",
".",
"_is_valid_input",
"(",
"comment",
",",
"metadata",
",",
"array",
"=",
"False",
")",
":",
"self",
".",
"_my_map",
"[",
"'journalComment'",
"]",
"[",
"'text'",
"]",
"=",
"comment",
"else",
":",
"raise",
"InvalidArgument",
"(",
")"
] |
Sets a comment.
arg: comment (string): the new comment
raise: InvalidArgument - comment is invalid
raise: NoAccess - metadata.is_readonly() is true
raise: NullArgument - comment is null
compliance: mandatory - This method must be implemented.
|
[
"Sets",
"a",
"comment",
"."
] |
python
|
train
|
elifesciences/elife-article
|
elifearticle/parse.py
|
https://github.com/elifesciences/elife-article/blob/99710c213cd81fe6fd1e5c150d6e20efe2d1e33b/elifearticle/parse.py#L356-L373
|
def build_pub_dates(article, pub_dates):
"convert pub_dates into ArticleDate objects and add them to article"
for pub_date in pub_dates:
# always want a date type, take it from pub-type if must
if pub_date.get('date-type'):
date_instance = ea.ArticleDate(pub_date.get('date-type'),
pub_date.get('date'))
elif pub_date.get('pub-type'):
date_instance = ea.ArticleDate(pub_date.get('pub-type'),
pub_date.get('date'))
# Set more values
utils.set_attr_if_value(date_instance, 'pub_type', pub_date.get('pub-type'))
utils.set_attr_if_value(date_instance, 'publication_format',
pub_date.get('publication-format'))
utils.set_attr_if_value(date_instance, 'day', pub_date.get('day'))
utils.set_attr_if_value(date_instance, 'month', pub_date.get('month'))
utils.set_attr_if_value(date_instance, 'year', pub_date.get('year'))
article.add_date(date_instance)
|
[
"def",
"build_pub_dates",
"(",
"article",
",",
"pub_dates",
")",
":",
"for",
"pub_date",
"in",
"pub_dates",
":",
"# always want a date type, take it from pub-type if must",
"if",
"pub_date",
".",
"get",
"(",
"'date-type'",
")",
":",
"date_instance",
"=",
"ea",
".",
"ArticleDate",
"(",
"pub_date",
".",
"get",
"(",
"'date-type'",
")",
",",
"pub_date",
".",
"get",
"(",
"'date'",
")",
")",
"elif",
"pub_date",
".",
"get",
"(",
"'pub-type'",
")",
":",
"date_instance",
"=",
"ea",
".",
"ArticleDate",
"(",
"pub_date",
".",
"get",
"(",
"'pub-type'",
")",
",",
"pub_date",
".",
"get",
"(",
"'date'",
")",
")",
"# Set more values",
"utils",
".",
"set_attr_if_value",
"(",
"date_instance",
",",
"'pub_type'",
",",
"pub_date",
".",
"get",
"(",
"'pub-type'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"date_instance",
",",
"'publication_format'",
",",
"pub_date",
".",
"get",
"(",
"'publication-format'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"date_instance",
",",
"'day'",
",",
"pub_date",
".",
"get",
"(",
"'day'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"date_instance",
",",
"'month'",
",",
"pub_date",
".",
"get",
"(",
"'month'",
")",
")",
"utils",
".",
"set_attr_if_value",
"(",
"date_instance",
",",
"'year'",
",",
"pub_date",
".",
"get",
"(",
"'year'",
")",
")",
"article",
".",
"add_date",
"(",
"date_instance",
")"
] |
convert pub_dates into ArticleDate objects and add them to article
|
[
"convert",
"pub_dates",
"into",
"ArticleDate",
"objects",
"and",
"add",
"them",
"to",
"article"
] |
python
|
train
|
sdispater/pendulum
|
pendulum/parser.py
|
https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/parser.py#L23-L112
|
def _parse(text, **options):
"""
Parses a string with the given options.
:param text: The string to parse.
:type text: str
:rtype: mixed
"""
# Handling special cases
if text == "now":
return pendulum.now()
parsed = base_parse(text, **options)
if isinstance(parsed, datetime.datetime):
return pendulum.datetime(
parsed.year,
parsed.month,
parsed.day,
parsed.hour,
parsed.minute,
parsed.second,
parsed.microsecond,
tz=parsed.tzinfo or options.get("tz", UTC),
)
if isinstance(parsed, datetime.date):
return pendulum.date(parsed.year, parsed.month, parsed.day)
if isinstance(parsed, datetime.time):
return pendulum.time(
parsed.hour, parsed.minute, parsed.second, parsed.microsecond
)
if isinstance(parsed, _Interval):
if parsed.duration is not None:
duration = parsed.duration
if parsed.start is not None:
dt = pendulum.instance(parsed.start, tz=options.get("tz", UTC))
return pendulum.period(
dt,
dt.add(
years=duration.years,
months=duration.months,
weeks=duration.weeks,
days=duration.remaining_days,
hours=duration.hours,
minutes=duration.minutes,
seconds=duration.remaining_seconds,
microseconds=duration.microseconds,
),
)
dt = pendulum.instance(parsed.end, tz=options.get("tz", UTC))
return pendulum.period(
dt.subtract(
years=duration.years,
months=duration.months,
weeks=duration.weeks,
days=duration.remaining_days,
hours=duration.hours,
minutes=duration.minutes,
seconds=duration.remaining_seconds,
microseconds=duration.microseconds,
),
dt,
)
return pendulum.period(
pendulum.instance(parsed.start, tz=options.get("tz", UTC)),
pendulum.instance(parsed.end, tz=options.get("tz", UTC)),
)
if CDuration and isinstance(parsed, CDuration):
return pendulum.duration(
years=parsed.years,
months=parsed.months,
weeks=parsed.weeks,
days=parsed.days,
hours=parsed.hours,
minutes=parsed.minutes,
seconds=parsed.seconds,
microseconds=parsed.microseconds,
)
return parsed
|
[
"def",
"_parse",
"(",
"text",
",",
"*",
"*",
"options",
")",
":",
"# Handling special cases",
"if",
"text",
"==",
"\"now\"",
":",
"return",
"pendulum",
".",
"now",
"(",
")",
"parsed",
"=",
"base_parse",
"(",
"text",
",",
"*",
"*",
"options",
")",
"if",
"isinstance",
"(",
"parsed",
",",
"datetime",
".",
"datetime",
")",
":",
"return",
"pendulum",
".",
"datetime",
"(",
"parsed",
".",
"year",
",",
"parsed",
".",
"month",
",",
"parsed",
".",
"day",
",",
"parsed",
".",
"hour",
",",
"parsed",
".",
"minute",
",",
"parsed",
".",
"second",
",",
"parsed",
".",
"microsecond",
",",
"tz",
"=",
"parsed",
".",
"tzinfo",
"or",
"options",
".",
"get",
"(",
"\"tz\"",
",",
"UTC",
")",
",",
")",
"if",
"isinstance",
"(",
"parsed",
",",
"datetime",
".",
"date",
")",
":",
"return",
"pendulum",
".",
"date",
"(",
"parsed",
".",
"year",
",",
"parsed",
".",
"month",
",",
"parsed",
".",
"day",
")",
"if",
"isinstance",
"(",
"parsed",
",",
"datetime",
".",
"time",
")",
":",
"return",
"pendulum",
".",
"time",
"(",
"parsed",
".",
"hour",
",",
"parsed",
".",
"minute",
",",
"parsed",
".",
"second",
",",
"parsed",
".",
"microsecond",
")",
"if",
"isinstance",
"(",
"parsed",
",",
"_Interval",
")",
":",
"if",
"parsed",
".",
"duration",
"is",
"not",
"None",
":",
"duration",
"=",
"parsed",
".",
"duration",
"if",
"parsed",
".",
"start",
"is",
"not",
"None",
":",
"dt",
"=",
"pendulum",
".",
"instance",
"(",
"parsed",
".",
"start",
",",
"tz",
"=",
"options",
".",
"get",
"(",
"\"tz\"",
",",
"UTC",
")",
")",
"return",
"pendulum",
".",
"period",
"(",
"dt",
",",
"dt",
".",
"add",
"(",
"years",
"=",
"duration",
".",
"years",
",",
"months",
"=",
"duration",
".",
"months",
",",
"weeks",
"=",
"duration",
".",
"weeks",
",",
"days",
"=",
"duration",
".",
"remaining_days",
",",
"hours",
"=",
"duration",
".",
"hours",
",",
"minutes",
"=",
"duration",
".",
"minutes",
",",
"seconds",
"=",
"duration",
".",
"remaining_seconds",
",",
"microseconds",
"=",
"duration",
".",
"microseconds",
",",
")",
",",
")",
"dt",
"=",
"pendulum",
".",
"instance",
"(",
"parsed",
".",
"end",
",",
"tz",
"=",
"options",
".",
"get",
"(",
"\"tz\"",
",",
"UTC",
")",
")",
"return",
"pendulum",
".",
"period",
"(",
"dt",
".",
"subtract",
"(",
"years",
"=",
"duration",
".",
"years",
",",
"months",
"=",
"duration",
".",
"months",
",",
"weeks",
"=",
"duration",
".",
"weeks",
",",
"days",
"=",
"duration",
".",
"remaining_days",
",",
"hours",
"=",
"duration",
".",
"hours",
",",
"minutes",
"=",
"duration",
".",
"minutes",
",",
"seconds",
"=",
"duration",
".",
"remaining_seconds",
",",
"microseconds",
"=",
"duration",
".",
"microseconds",
",",
")",
",",
"dt",
",",
")",
"return",
"pendulum",
".",
"period",
"(",
"pendulum",
".",
"instance",
"(",
"parsed",
".",
"start",
",",
"tz",
"=",
"options",
".",
"get",
"(",
"\"tz\"",
",",
"UTC",
")",
")",
",",
"pendulum",
".",
"instance",
"(",
"parsed",
".",
"end",
",",
"tz",
"=",
"options",
".",
"get",
"(",
"\"tz\"",
",",
"UTC",
")",
")",
",",
")",
"if",
"CDuration",
"and",
"isinstance",
"(",
"parsed",
",",
"CDuration",
")",
":",
"return",
"pendulum",
".",
"duration",
"(",
"years",
"=",
"parsed",
".",
"years",
",",
"months",
"=",
"parsed",
".",
"months",
",",
"weeks",
"=",
"parsed",
".",
"weeks",
",",
"days",
"=",
"parsed",
".",
"days",
",",
"hours",
"=",
"parsed",
".",
"hours",
",",
"minutes",
"=",
"parsed",
".",
"minutes",
",",
"seconds",
"=",
"parsed",
".",
"seconds",
",",
"microseconds",
"=",
"parsed",
".",
"microseconds",
",",
")",
"return",
"parsed"
] |
Parses a string with the given options.
:param text: The string to parse.
:type text: str
:rtype: mixed
|
[
"Parses",
"a",
"string",
"with",
"the",
"given",
"options",
"."
] |
python
|
train
|
iotile/coretools
|
transport_plugins/awsiot/iotile_transport_awsiot/connection_manager.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/awsiot/iotile_transport_awsiot/connection_manager.py#L508-L546
|
def _finish_disconnection_action(self, action):
"""Finish a disconnection attempt
There are two possible outcomes:
- if we were successful at disconnecting, we transition to disconnected
- if we failed at disconnecting, we transition back to idle
Args:
action (ConnectionAction): the action object describing what we are
disconnecting from and what the result of the operation was
"""
success = action.data['success']
conn_key = action.data['id']
if self._get_connection_state(conn_key) != self.Disconnecting:
self._logger.error("Invalid finish_disconnection action on a connection whose state is not Disconnecting, conn_key=%s", str(conn_key))
return
# Cannot be None since we checked above to make sure it exists
data = self._get_connection(conn_key)
callback = data['callback']
conn_id = data['conn_id']
int_id = data['int_id']
if success is False:
reason = action.data['reason']
if reason is None:
reason = "No reason was given"
data['state'] = self.Idle
data['microstate'] = None
data['callback'] = None
callback(conn_id, self.id, False, reason)
else:
del self._connections[conn_id]
del self._int_connections[int_id]
callback(conn_id, self.id, True, None)
|
[
"def",
"_finish_disconnection_action",
"(",
"self",
",",
"action",
")",
":",
"success",
"=",
"action",
".",
"data",
"[",
"'success'",
"]",
"conn_key",
"=",
"action",
".",
"data",
"[",
"'id'",
"]",
"if",
"self",
".",
"_get_connection_state",
"(",
"conn_key",
")",
"!=",
"self",
".",
"Disconnecting",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"Invalid finish_disconnection action on a connection whose state is not Disconnecting, conn_key=%s\"",
",",
"str",
"(",
"conn_key",
")",
")",
"return",
"# Cannot be None since we checked above to make sure it exists",
"data",
"=",
"self",
".",
"_get_connection",
"(",
"conn_key",
")",
"callback",
"=",
"data",
"[",
"'callback'",
"]",
"conn_id",
"=",
"data",
"[",
"'conn_id'",
"]",
"int_id",
"=",
"data",
"[",
"'int_id'",
"]",
"if",
"success",
"is",
"False",
":",
"reason",
"=",
"action",
".",
"data",
"[",
"'reason'",
"]",
"if",
"reason",
"is",
"None",
":",
"reason",
"=",
"\"No reason was given\"",
"data",
"[",
"'state'",
"]",
"=",
"self",
".",
"Idle",
"data",
"[",
"'microstate'",
"]",
"=",
"None",
"data",
"[",
"'callback'",
"]",
"=",
"None",
"callback",
"(",
"conn_id",
",",
"self",
".",
"id",
",",
"False",
",",
"reason",
")",
"else",
":",
"del",
"self",
".",
"_connections",
"[",
"conn_id",
"]",
"del",
"self",
".",
"_int_connections",
"[",
"int_id",
"]",
"callback",
"(",
"conn_id",
",",
"self",
".",
"id",
",",
"True",
",",
"None",
")"
] |
Finish a disconnection attempt
There are two possible outcomes:
- if we were successful at disconnecting, we transition to disconnected
- if we failed at disconnecting, we transition back to idle
Args:
action (ConnectionAction): the action object describing what we are
disconnecting from and what the result of the operation was
|
[
"Finish",
"a",
"disconnection",
"attempt"
] |
python
|
train
|
hydpy-dev/hydpy
|
hydpy/core/devicetools.py
|
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/devicetools.py#L2154-L2229
|
def plot_inputseries(
self, names: Optional[Iterable[str]] = None,
average: bool = False, **kwargs: Any) \
-> None:
"""Plot (the selected) |InputSequence| |IOSequence.series| values.
We demonstrate the functionalities of method |Element.plot_inputseries|
based on the `Lahn` example project:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, _, _ = prepare_full_example_2(lastdate='1997-01-01')
Without any arguments, |Element.plot_inputseries| prints the
time series of all input sequences handled by its |Model| object
directly to the screen (in the given example, |hland_inputs.P|,
|hland_inputs.T|, |hland_inputs.TN|, and |hland_inputs.EPN| of
application model |hland_v1|):
>>> land = hp.elements.land_dill
>>> land.plot_inputseries()
You can use the `pyplot` API of `matplotlib` to modify the figure
or to save it to disk (or print it to the screen, in case the
interactive mode of `matplotlib` is disabled):
>>> from matplotlib import pyplot
>>> from hydpy.docs import figs
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_inputseries.png')
>>> pyplot.close()
.. image:: Element_plot_inputseries.png
Methods |Element.plot_fluxseries| and |Element.plot_stateseries|
work in the same manner. Before applying them, one has at first
to calculate the time series of the |FluxSequence| and
|StateSequence| objects:
>>> hp.doit()
All three methods allow to select certain sequences by passing their
names (here, flux sequences |hland_fluxes.Q0| and |hland_fluxes.Q1|
of |hland_v1|). Additionally, you can pass the keyword arguments
supported by `matplotlib` for modifying the line style:
>>> land.plot_fluxseries(['q0', 'q1'], linewidth=2)
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_fluxseries.png')
>>> pyplot.close()
.. image:: Element_plot_fluxseries.png
For 1-dimensional |IOSequence| objects, all three methods plot the
individual time series in the same colour (here, from the state
sequences |hland_states.SP| and |hland_states.WC| of |hland_v1|):
>>> land.plot_stateseries(['sp', 'wc'])
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_stateseries1.png')
>>> pyplot.close()
.. image:: Element_plot_stateseries1.png
Alternatively, you can print the averaged time series through
passing |True| to the method `average` argument (demonstrated
for the state sequence |hland_states.SM|):
>>> land.plot_stateseries(['sm'], color='grey')
>>> land.plot_stateseries(
... ['sm'], average=True, color='black', linewidth=3)
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_stateseries2.png')
>>> pyplot.close()
.. image:: Element_plot_stateseries2.png
"""
self.__plot(self.model.sequences.inputs, names, average, kwargs)
|
[
"def",
"plot_inputseries",
"(",
"self",
",",
"names",
":",
"Optional",
"[",
"Iterable",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"average",
":",
"bool",
"=",
"False",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"None",
":",
"self",
".",
"__plot",
"(",
"self",
".",
"model",
".",
"sequences",
".",
"inputs",
",",
"names",
",",
"average",
",",
"kwargs",
")"
] |
Plot (the selected) |InputSequence| |IOSequence.series| values.
We demonstrate the functionalities of method |Element.plot_inputseries|
based on the `Lahn` example project:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, _, _ = prepare_full_example_2(lastdate='1997-01-01')
Without any arguments, |Element.plot_inputseries| prints the
time series of all input sequences handled by its |Model| object
directly to the screen (in the given example, |hland_inputs.P|,
|hland_inputs.T|, |hland_inputs.TN|, and |hland_inputs.EPN| of
application model |hland_v1|):
>>> land = hp.elements.land_dill
>>> land.plot_inputseries()
You can use the `pyplot` API of `matplotlib` to modify the figure
or to save it to disk (or print it to the screen, in case the
interactive mode of `matplotlib` is disabled):
>>> from matplotlib import pyplot
>>> from hydpy.docs import figs
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_inputseries.png')
>>> pyplot.close()
.. image:: Element_plot_inputseries.png
Methods |Element.plot_fluxseries| and |Element.plot_stateseries|
work in the same manner. Before applying them, one has at first
to calculate the time series of the |FluxSequence| and
|StateSequence| objects:
>>> hp.doit()
All three methods allow to select certain sequences by passing their
names (here, flux sequences |hland_fluxes.Q0| and |hland_fluxes.Q1|
of |hland_v1|). Additionally, you can pass the keyword arguments
supported by `matplotlib` for modifying the line style:
>>> land.plot_fluxseries(['q0', 'q1'], linewidth=2)
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_fluxseries.png')
>>> pyplot.close()
.. image:: Element_plot_fluxseries.png
For 1-dimensional |IOSequence| objects, all three methods plot the
individual time series in the same colour (here, from the state
sequences |hland_states.SP| and |hland_states.WC| of |hland_v1|):
>>> land.plot_stateseries(['sp', 'wc'])
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_stateseries1.png')
>>> pyplot.close()
.. image:: Element_plot_stateseries1.png
Alternatively, you can print the averaged time series through
passing |True| to the method `average` argument (demonstrated
for the state sequence |hland_states.SM|):
>>> land.plot_stateseries(['sm'], color='grey')
>>> land.plot_stateseries(
... ['sm'], average=True, color='black', linewidth=3)
>>> pyplot.savefig(figs.__path__[0] + '/Element_plot_stateseries2.png')
>>> pyplot.close()
.. image:: Element_plot_stateseries2.png
|
[
"Plot",
"(",
"the",
"selected",
")",
"|InputSequence|",
"|IOSequence",
".",
"series|",
"values",
"."
] |
python
|
train
|
openstack/networking-cisco
|
networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py
|
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py#L907-L919
|
def fill_fw_dict_from_db(self, fw_data):
"""
This routine is called to create a local fw_dict with data from DB.
"""
rule_dict = fw_data.get('rules').get('rules')
fw_dict = {'fw_id': fw_data.get('fw_id'),
'fw_name': fw_data.get('name'),
'firewall_policy_id': fw_data.get('firewall_policy_id'),
'fw_type': fw_data.get('fw_type'),
'router_id': fw_data.get('router_id'), 'rules': {}}
for rule in rule_dict:
fw_dict['rules'][rule] = rule_dict.get(rule)
return fw_dict
|
[
"def",
"fill_fw_dict_from_db",
"(",
"self",
",",
"fw_data",
")",
":",
"rule_dict",
"=",
"fw_data",
".",
"get",
"(",
"'rules'",
")",
".",
"get",
"(",
"'rules'",
")",
"fw_dict",
"=",
"{",
"'fw_id'",
":",
"fw_data",
".",
"get",
"(",
"'fw_id'",
")",
",",
"'fw_name'",
":",
"fw_data",
".",
"get",
"(",
"'name'",
")",
",",
"'firewall_policy_id'",
":",
"fw_data",
".",
"get",
"(",
"'firewall_policy_id'",
")",
",",
"'fw_type'",
":",
"fw_data",
".",
"get",
"(",
"'fw_type'",
")",
",",
"'router_id'",
":",
"fw_data",
".",
"get",
"(",
"'router_id'",
")",
",",
"'rules'",
":",
"{",
"}",
"}",
"for",
"rule",
"in",
"rule_dict",
":",
"fw_dict",
"[",
"'rules'",
"]",
"[",
"rule",
"]",
"=",
"rule_dict",
".",
"get",
"(",
"rule",
")",
"return",
"fw_dict"
] |
This routine is called to create a local fw_dict with data from DB.
|
[
"This",
"routine",
"is",
"called",
"to",
"create",
"a",
"local",
"fw_dict",
"with",
"data",
"from",
"DB",
"."
] |
python
|
train
|
gem/oq-engine
|
openquake/hazardlib/gsim/bradley_2013b.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/bradley_2013b.py#L63-L96
|
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
if isinstance(imt, PGA):
imt_per = 0.0
else:
imt_per = imt.period
# Fix site parameters for consistent dS2S application.
sites.vs30 = np.array([250])
sites.z1pt0 = np.array([330])
# intensity on a reference soil is used for both mean
# and stddev calculations.
ln_y_ref = self._get_ln_y_ref(rup, dists, C)
# exp1 and exp2 are parts of eq. 7
exp1 = np.exp(C['phi3'] * (sites.vs30.clip(-np.inf, 1130) - 360))
exp2 = np.exp(C['phi3'] * (1130 - 360))
# v1 is the period dependent site term. The Vs30 above which, the
# amplification is constant
v1 = self._get_v1(imt)
# Get log-mean from regular unadjusted model
b13a_mean = self._get_mean(sites, C, ln_y_ref, exp1, exp2, v1)
# Adjust mean and standard deviation
mean = b13a_mean + self._get_dL2L(imt_per) + self._get_dS2S(imt_per)
mean += convert_to_LHC(imt)
stddevs = self._get_adjusted_stddevs(sites, rup, C, stddev_types,
ln_y_ref, exp1, exp2, imt_per)
return mean, stddevs
|
[
"def",
"get_mean_and_stddevs",
"(",
"self",
",",
"sites",
",",
"rup",
",",
"dists",
",",
"imt",
",",
"stddev_types",
")",
":",
"# extracting dictionary of coefficients specific to required",
"# intensity measure type.",
"C",
"=",
"self",
".",
"COEFFS",
"[",
"imt",
"]",
"if",
"isinstance",
"(",
"imt",
",",
"PGA",
")",
":",
"imt_per",
"=",
"0.0",
"else",
":",
"imt_per",
"=",
"imt",
".",
"period",
"# Fix site parameters for consistent dS2S application.",
"sites",
".",
"vs30",
"=",
"np",
".",
"array",
"(",
"[",
"250",
"]",
")",
"sites",
".",
"z1pt0",
"=",
"np",
".",
"array",
"(",
"[",
"330",
"]",
")",
"# intensity on a reference soil is used for both mean",
"# and stddev calculations.",
"ln_y_ref",
"=",
"self",
".",
"_get_ln_y_ref",
"(",
"rup",
",",
"dists",
",",
"C",
")",
"# exp1 and exp2 are parts of eq. 7",
"exp1",
"=",
"np",
".",
"exp",
"(",
"C",
"[",
"'phi3'",
"]",
"*",
"(",
"sites",
".",
"vs30",
".",
"clip",
"(",
"-",
"np",
".",
"inf",
",",
"1130",
")",
"-",
"360",
")",
")",
"exp2",
"=",
"np",
".",
"exp",
"(",
"C",
"[",
"'phi3'",
"]",
"*",
"(",
"1130",
"-",
"360",
")",
")",
"# v1 is the period dependent site term. The Vs30 above which, the",
"# amplification is constant",
"v1",
"=",
"self",
".",
"_get_v1",
"(",
"imt",
")",
"# Get log-mean from regular unadjusted model",
"b13a_mean",
"=",
"self",
".",
"_get_mean",
"(",
"sites",
",",
"C",
",",
"ln_y_ref",
",",
"exp1",
",",
"exp2",
",",
"v1",
")",
"# Adjust mean and standard deviation",
"mean",
"=",
"b13a_mean",
"+",
"self",
".",
"_get_dL2L",
"(",
"imt_per",
")",
"+",
"self",
".",
"_get_dS2S",
"(",
"imt_per",
")",
"mean",
"+=",
"convert_to_LHC",
"(",
"imt",
")",
"stddevs",
"=",
"self",
".",
"_get_adjusted_stddevs",
"(",
"sites",
",",
"rup",
",",
"C",
",",
"stddev_types",
",",
"ln_y_ref",
",",
"exp1",
",",
"exp2",
",",
"imt_per",
")",
"return",
"mean",
",",
"stddevs"
] |
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
|
[
"See",
":",
"meth",
":",
"superclass",
"method",
"<",
".",
"base",
".",
"GroundShakingIntensityModel",
".",
"get_mean_and_stddevs",
">",
"for",
"spec",
"of",
"input",
"and",
"result",
"values",
"."
] |
python
|
train
|
NerdWalletOSS/savage
|
src/savage/api/data.py
|
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/api/data.py#L104-L144
|
def _format_response(rows, fields, unique_col_names):
"""This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row.
"""
output = []
old_id = None
for row in rows:
id_ = {k: row[k] for k in unique_col_names}
formatted = {k: row[k] for k in row if k != 'data'}
if id_ != old_id: # new unique versioned row
data = row['data']
formatted['data'] = {k: data.get(k) for k in fields}
output.append(formatted)
else:
data = row['data']
pruned_data = {k: data.get(k) for k in fields}
if (
pruned_data != output[-1]['data'] or
row['deleted'] != output[-1]['deleted']
):
formatted['data'] = pruned_data
output.append(formatted)
old_id = id_
return output
|
[
"def",
"_format_response",
"(",
"rows",
",",
"fields",
",",
"unique_col_names",
")",
":",
"output",
"=",
"[",
"]",
"old_id",
"=",
"None",
"for",
"row",
"in",
"rows",
":",
"id_",
"=",
"{",
"k",
":",
"row",
"[",
"k",
"]",
"for",
"k",
"in",
"unique_col_names",
"}",
"formatted",
"=",
"{",
"k",
":",
"row",
"[",
"k",
"]",
"for",
"k",
"in",
"row",
"if",
"k",
"!=",
"'data'",
"}",
"if",
"id_",
"!=",
"old_id",
":",
"# new unique versioned row",
"data",
"=",
"row",
"[",
"'data'",
"]",
"formatted",
"[",
"'data'",
"]",
"=",
"{",
"k",
":",
"data",
".",
"get",
"(",
"k",
")",
"for",
"k",
"in",
"fields",
"}",
"output",
".",
"append",
"(",
"formatted",
")",
"else",
":",
"data",
"=",
"row",
"[",
"'data'",
"]",
"pruned_data",
"=",
"{",
"k",
":",
"data",
".",
"get",
"(",
"k",
")",
"for",
"k",
"in",
"fields",
"}",
"if",
"(",
"pruned_data",
"!=",
"output",
"[",
"-",
"1",
"]",
"[",
"'data'",
"]",
"or",
"row",
"[",
"'deleted'",
"]",
"!=",
"output",
"[",
"-",
"1",
"]",
"[",
"'deleted'",
"]",
")",
":",
"formatted",
"[",
"'data'",
"]",
"=",
"pruned_data",
"output",
".",
"append",
"(",
"formatted",
")",
"old_id",
"=",
"id_",
"return",
"output"
] |
This function will look at the data column of rows and extract the specified fields. It
will also dedup changes where the specified fields have not changed. The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order.
This function will return a list of dictionaries where each dictionary has the following
schema:
{
'updated_at': timestamp of the change,
'version': version number for the change,
'data': a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table.
}
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions.
:param rows: a list of dictionaries representing rows from the ArchiveTable.
:param fields: a list of strings of fields to be extracted from the archived row.
|
[
"This",
"function",
"will",
"look",
"at",
"the",
"data",
"column",
"of",
"rows",
"and",
"extract",
"the",
"specified",
"fields",
".",
"It",
"will",
"also",
"dedup",
"changes",
"where",
"the",
"specified",
"fields",
"have",
"not",
"changed",
".",
"The",
"list",
"of",
"rows",
"should",
"be",
"ordered",
"by",
"the",
"compound",
"primary",
"key",
"which",
"versioning",
"pivots",
"around",
"and",
"be",
"in",
"ascending",
"version",
"order",
"."
] |
python
|
train
|
sebp/scikit-survival
|
sksurv/io/arffwrite.py
|
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/io/arffwrite.py#L130-L146
|
def _write_data(data, fp):
"""Write the data section"""
fp.write("@data\n")
def to_str(x):
if pandas.isnull(x):
return '?'
else:
return str(x)
data = data.applymap(to_str)
n_rows = data.shape[0]
for i in range(n_rows):
str_values = list(data.iloc[i, :].apply(_check_str_array))
line = ",".join(str_values)
fp.write(line)
fp.write("\n")
|
[
"def",
"_write_data",
"(",
"data",
",",
"fp",
")",
":",
"fp",
".",
"write",
"(",
"\"@data\\n\"",
")",
"def",
"to_str",
"(",
"x",
")",
":",
"if",
"pandas",
".",
"isnull",
"(",
"x",
")",
":",
"return",
"'?'",
"else",
":",
"return",
"str",
"(",
"x",
")",
"data",
"=",
"data",
".",
"applymap",
"(",
"to_str",
")",
"n_rows",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"n_rows",
")",
":",
"str_values",
"=",
"list",
"(",
"data",
".",
"iloc",
"[",
"i",
",",
":",
"]",
".",
"apply",
"(",
"_check_str_array",
")",
")",
"line",
"=",
"\",\"",
".",
"join",
"(",
"str_values",
")",
"fp",
".",
"write",
"(",
"line",
")",
"fp",
".",
"write",
"(",
"\"\\n\"",
")"
] |
Write the data section
|
[
"Write",
"the",
"data",
"section"
] |
python
|
train
|
PythonCharmers/python-future
|
src/future/backports/email/__init__.py
|
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/__init__.py#L64-L70
|
def message_from_file(fp, *args, **kws):
"""Read a file and parse its contents into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from future.backports.email.parser import Parser
return Parser(*args, **kws).parse(fp)
|
[
"def",
"message_from_file",
"(",
"fp",
",",
"*",
"args",
",",
"*",
"*",
"kws",
")",
":",
"from",
"future",
".",
"backports",
".",
"email",
".",
"parser",
"import",
"Parser",
"return",
"Parser",
"(",
"*",
"args",
",",
"*",
"*",
"kws",
")",
".",
"parse",
"(",
"fp",
")"
] |
Read a file and parse its contents into a Message object model.
Optional _class and strict are passed to the Parser constructor.
|
[
"Read",
"a",
"file",
"and",
"parse",
"its",
"contents",
"into",
"a",
"Message",
"object",
"model",
"."
] |
python
|
train
|
inveniosoftware-contrib/invenio-classifier
|
invenio_classifier/reader.py
|
https://github.com/inveniosoftware-contrib/invenio-classifier/blob/3c758cf34dca6bf0548e7da5de34e5f72e3b255e/invenio_classifier/reader.py#L99-L165
|
def get_regular_expressions(taxonomy_name, rebuild=False, no_cache=False):
"""Return a list of patterns compiled from the RDF/SKOS ontology.
Uses cache if it exists and if the taxonomy hasn't changed.
"""
# Translate the ontology name into a local path. Check if the name
# relates to an existing ontology.
onto_name, onto_path, onto_url = _get_ontology(taxonomy_name)
if not onto_path:
raise TaxonomyError("Unable to locate the taxonomy: '%s'."
% taxonomy_name)
cache_path = _get_cache_path(onto_name)
current_app.logger.debug(
'Taxonomy discovered, now we load it '
'(from cache: %s, onto_path: %s, cache_path: %s)'
% (not no_cache, onto_path, cache_path)
)
if os.access(cache_path, os.R_OK):
if os.access(onto_path, os.R_OK):
if rebuild or no_cache:
current_app.logger.debug(
"Cache generation was manually forced.")
return _build_cache(onto_path, skip_cache=no_cache)
else:
# ontology file not found. Use the cache instead.
current_app.logger.warning(
"The ontology couldn't be located. However "
"a cached version of it is available. Using it as a "
"reference."
)
return _get_cache(cache_path, source_file=onto_path)
if (os.path.getmtime(cache_path) >
os.path.getmtime(onto_path)):
# Cache is more recent than the ontology: use cache.
current_app.logger.debug(
"Normal situation, cache is older than ontology,"
" so we load it from cache"
)
return _get_cache(cache_path, source_file=onto_path)
else:
# Ontology is more recent than the cache: rebuild cache.
current_app.logger.warning(
"Cache '%s' is older than '%s'. "
"We will rebuild the cache" %
(cache_path, onto_path)
)
return _build_cache(onto_path, skip_cache=no_cache)
elif os.access(onto_path, os.R_OK):
if not no_cache and\
os.path.exists(cache_path) and\
not os.access(cache_path, os.W_OK):
raise TaxonomyError('We cannot read/write into: %s. '
'Aborting!' % cache_path)
elif not no_cache and os.path.exists(cache_path):
current_app.logger.warning(
'Cache %s exists, but is not readable!' % cache_path)
current_app.logger.info(
"Cache not available. Building it now: %s" % onto_path)
return _build_cache(onto_path, skip_cache=no_cache)
else:
raise TaxonomyError("We miss both source and cache"
" of the taxonomy: %s" % taxonomy_name)
|
[
"def",
"get_regular_expressions",
"(",
"taxonomy_name",
",",
"rebuild",
"=",
"False",
",",
"no_cache",
"=",
"False",
")",
":",
"# Translate the ontology name into a local path. Check if the name",
"# relates to an existing ontology.",
"onto_name",
",",
"onto_path",
",",
"onto_url",
"=",
"_get_ontology",
"(",
"taxonomy_name",
")",
"if",
"not",
"onto_path",
":",
"raise",
"TaxonomyError",
"(",
"\"Unable to locate the taxonomy: '%s'.\"",
"%",
"taxonomy_name",
")",
"cache_path",
"=",
"_get_cache_path",
"(",
"onto_name",
")",
"current_app",
".",
"logger",
".",
"debug",
"(",
"'Taxonomy discovered, now we load it '",
"'(from cache: %s, onto_path: %s, cache_path: %s)'",
"%",
"(",
"not",
"no_cache",
",",
"onto_path",
",",
"cache_path",
")",
")",
"if",
"os",
".",
"access",
"(",
"cache_path",
",",
"os",
".",
"R_OK",
")",
":",
"if",
"os",
".",
"access",
"(",
"onto_path",
",",
"os",
".",
"R_OK",
")",
":",
"if",
"rebuild",
"or",
"no_cache",
":",
"current_app",
".",
"logger",
".",
"debug",
"(",
"\"Cache generation was manually forced.\"",
")",
"return",
"_build_cache",
"(",
"onto_path",
",",
"skip_cache",
"=",
"no_cache",
")",
"else",
":",
"# ontology file not found. Use the cache instead.",
"current_app",
".",
"logger",
".",
"warning",
"(",
"\"The ontology couldn't be located. However \"",
"\"a cached version of it is available. Using it as a \"",
"\"reference.\"",
")",
"return",
"_get_cache",
"(",
"cache_path",
",",
"source_file",
"=",
"onto_path",
")",
"if",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"cache_path",
")",
">",
"os",
".",
"path",
".",
"getmtime",
"(",
"onto_path",
")",
")",
":",
"# Cache is more recent than the ontology: use cache.",
"current_app",
".",
"logger",
".",
"debug",
"(",
"\"Normal situation, cache is older than ontology,\"",
"\" so we load it from cache\"",
")",
"return",
"_get_cache",
"(",
"cache_path",
",",
"source_file",
"=",
"onto_path",
")",
"else",
":",
"# Ontology is more recent than the cache: rebuild cache.",
"current_app",
".",
"logger",
".",
"warning",
"(",
"\"Cache '%s' is older than '%s'. \"",
"\"We will rebuild the cache\"",
"%",
"(",
"cache_path",
",",
"onto_path",
")",
")",
"return",
"_build_cache",
"(",
"onto_path",
",",
"skip_cache",
"=",
"no_cache",
")",
"elif",
"os",
".",
"access",
"(",
"onto_path",
",",
"os",
".",
"R_OK",
")",
":",
"if",
"not",
"no_cache",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"cache_path",
")",
"and",
"not",
"os",
".",
"access",
"(",
"cache_path",
",",
"os",
".",
"W_OK",
")",
":",
"raise",
"TaxonomyError",
"(",
"'We cannot read/write into: %s. '",
"'Aborting!'",
"%",
"cache_path",
")",
"elif",
"not",
"no_cache",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"cache_path",
")",
":",
"current_app",
".",
"logger",
".",
"warning",
"(",
"'Cache %s exists, but is not readable!'",
"%",
"cache_path",
")",
"current_app",
".",
"logger",
".",
"info",
"(",
"\"Cache not available. Building it now: %s\"",
"%",
"onto_path",
")",
"return",
"_build_cache",
"(",
"onto_path",
",",
"skip_cache",
"=",
"no_cache",
")",
"else",
":",
"raise",
"TaxonomyError",
"(",
"\"We miss both source and cache\"",
"\" of the taxonomy: %s\"",
"%",
"taxonomy_name",
")"
] |
Return a list of patterns compiled from the RDF/SKOS ontology.
Uses cache if it exists and if the taxonomy hasn't changed.
|
[
"Return",
"a",
"list",
"of",
"patterns",
"compiled",
"from",
"the",
"RDF",
"/",
"SKOS",
"ontology",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/wordpress.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/wordpress.py#L54-L82
|
def show_plugin(name, path, user):
'''
Show a plugin in a wordpress install and check if it is installed
name
Wordpress plugin name
path
path to wordpress install location
user
user to run the command as
CLI Example:
.. code-block:: bash
salt '*' wordpress.show_plugin HyperDB /var/www/html apache
'''
ret = {'name': name}
resp = __salt__['cmd.shell']((
'wp --path={0} plugin status {1}'
).format(path, name), runas=user).split('\n')
for line in resp:
if 'Status' in line:
ret['status'] = line.split(' ')[-1].lower()
elif 'Version' in line:
ret['version'] = line.split(' ')[-1].lower()
return ret
|
[
"def",
"show_plugin",
"(",
"name",
",",
"path",
",",
"user",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
"}",
"resp",
"=",
"__salt__",
"[",
"'cmd.shell'",
"]",
"(",
"(",
"'wp --path={0} plugin status {1}'",
")",
".",
"format",
"(",
"path",
",",
"name",
")",
",",
"runas",
"=",
"user",
")",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"resp",
":",
"if",
"'Status'",
"in",
"line",
":",
"ret",
"[",
"'status'",
"]",
"=",
"line",
".",
"split",
"(",
"' '",
")",
"[",
"-",
"1",
"]",
".",
"lower",
"(",
")",
"elif",
"'Version'",
"in",
"line",
":",
"ret",
"[",
"'version'",
"]",
"=",
"line",
".",
"split",
"(",
"' '",
")",
"[",
"-",
"1",
"]",
".",
"lower",
"(",
")",
"return",
"ret"
] |
Show a plugin in a wordpress install and check if it is installed
name
Wordpress plugin name
path
path to wordpress install location
user
user to run the command as
CLI Example:
.. code-block:: bash
salt '*' wordpress.show_plugin HyperDB /var/www/html apache
|
[
"Show",
"a",
"plugin",
"in",
"a",
"wordpress",
"install",
"and",
"check",
"if",
"it",
"is",
"installed"
] |
python
|
train
|
cloudera/cm_api
|
python/src/cm_api/endpoints/services.py
|
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/services.py#L640-L648
|
def get_all_role_config_groups(self):
"""
Get a list of role configuration groups in the service.
@return: A list of ApiRoleConfigGroup objects.
@since: API v3
"""
return role_config_groups.get_all_role_config_groups(
self._get_resource_root(), self.name, self._get_cluster_name())
|
[
"def",
"get_all_role_config_groups",
"(",
"self",
")",
":",
"return",
"role_config_groups",
".",
"get_all_role_config_groups",
"(",
"self",
".",
"_get_resource_root",
"(",
")",
",",
"self",
".",
"name",
",",
"self",
".",
"_get_cluster_name",
"(",
")",
")"
] |
Get a list of role configuration groups in the service.
@return: A list of ApiRoleConfigGroup objects.
@since: API v3
|
[
"Get",
"a",
"list",
"of",
"role",
"configuration",
"groups",
"in",
"the",
"service",
"."
] |
python
|
train
|
adamrehn/ue4cli
|
ue4cli/UnrealManagerBase.py
|
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/UnrealManagerBase.py#L258-L268
|
def getThirdPartyLibFiles(self, libs):
"""
Retrieves the list of library files for building against the Unreal-bundled versions of the specified third-party libraries
"""
platformDefaults = True
if libs[0] == '--nodefaults':
platformDefaults = False
libs = libs[1:]
details = self.getThirdpartyLibs(libs, includePlatformDefaults=platformDefaults)
return details.getLibraryFiles(self.getEngineRoot(), delimiter='\n')
|
[
"def",
"getThirdPartyLibFiles",
"(",
"self",
",",
"libs",
")",
":",
"platformDefaults",
"=",
"True",
"if",
"libs",
"[",
"0",
"]",
"==",
"'--nodefaults'",
":",
"platformDefaults",
"=",
"False",
"libs",
"=",
"libs",
"[",
"1",
":",
"]",
"details",
"=",
"self",
".",
"getThirdpartyLibs",
"(",
"libs",
",",
"includePlatformDefaults",
"=",
"platformDefaults",
")",
"return",
"details",
".",
"getLibraryFiles",
"(",
"self",
".",
"getEngineRoot",
"(",
")",
",",
"delimiter",
"=",
"'\\n'",
")"
] |
Retrieves the list of library files for building against the Unreal-bundled versions of the specified third-party libraries
|
[
"Retrieves",
"the",
"list",
"of",
"library",
"files",
"for",
"building",
"against",
"the",
"Unreal",
"-",
"bundled",
"versions",
"of",
"the",
"specified",
"third",
"-",
"party",
"libraries"
] |
python
|
train
|
apache/incubator-mxnet
|
python/mxnet/initializer.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/initializer.py#L171-L217
|
def _legacy_init(self, name, arr):
"""Legacy initialization method.
Parameters
----------
name : str
Name of corresponding NDArray.
arr : NDArray
NDArray to be initialized.
"""
warnings.warn(
"\033[91mCalling initializer with init(str, NDArray) has been deprecated." \
"please use init(mx.init.InitDesc(...), NDArray) instead.\033[0m",
DeprecationWarning, stacklevel=3)
if not isinstance(name, string_types):
raise TypeError('name must be string')
if not isinstance(arr, NDArray):
raise TypeError('arr must be NDArray')
if name.startswith('upsampling'):
self._init_bilinear(name, arr)
elif name.startswith('stn_loc') and name.endswith('weight'):
self._init_zero(name, arr)
elif name.startswith('stn_loc') and name.endswith('bias'):
self._init_loc_bias(name, arr)
elif name.endswith('bias'):
self._init_bias(name, arr)
elif name.endswith('gamma'):
self._init_gamma(name, arr)
elif name.endswith('beta'):
self._init_beta(name, arr)
elif name.endswith('weight'):
self._init_weight(name, arr)
elif name.endswith("moving_mean"):
self._init_zero(name, arr)
elif name.endswith("moving_var"):
self._init_one(name, arr)
elif name.endswith("moving_inv_var"):
self._init_zero(name, arr)
elif name.endswith("moving_avg"):
self._init_zero(name, arr)
elif name.endswith('min'):
self._init_zero(name, arr)
elif name.endswith('max'):
self._init_one(name, arr)
else:
self._init_default(name, arr)
|
[
"def",
"_legacy_init",
"(",
"self",
",",
"name",
",",
"arr",
")",
":",
"warnings",
".",
"warn",
"(",
"\"\\033[91mCalling initializer with init(str, NDArray) has been deprecated.\"",
"\"please use init(mx.init.InitDesc(...), NDArray) instead.\\033[0m\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"3",
")",
"if",
"not",
"isinstance",
"(",
"name",
",",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'name must be string'",
")",
"if",
"not",
"isinstance",
"(",
"arr",
",",
"NDArray",
")",
":",
"raise",
"TypeError",
"(",
"'arr must be NDArray'",
")",
"if",
"name",
".",
"startswith",
"(",
"'upsampling'",
")",
":",
"self",
".",
"_init_bilinear",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"startswith",
"(",
"'stn_loc'",
")",
"and",
"name",
".",
"endswith",
"(",
"'weight'",
")",
":",
"self",
".",
"_init_zero",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"startswith",
"(",
"'stn_loc'",
")",
"and",
"name",
".",
"endswith",
"(",
"'bias'",
")",
":",
"self",
".",
"_init_loc_bias",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'bias'",
")",
":",
"self",
".",
"_init_bias",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'gamma'",
")",
":",
"self",
".",
"_init_gamma",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'beta'",
")",
":",
"self",
".",
"_init_beta",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'weight'",
")",
":",
"self",
".",
"_init_weight",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"\"moving_mean\"",
")",
":",
"self",
".",
"_init_zero",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"\"moving_var\"",
")",
":",
"self",
".",
"_init_one",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"\"moving_inv_var\"",
")",
":",
"self",
".",
"_init_zero",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"\"moving_avg\"",
")",
":",
"self",
".",
"_init_zero",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'min'",
")",
":",
"self",
".",
"_init_zero",
"(",
"name",
",",
"arr",
")",
"elif",
"name",
".",
"endswith",
"(",
"'max'",
")",
":",
"self",
".",
"_init_one",
"(",
"name",
",",
"arr",
")",
"else",
":",
"self",
".",
"_init_default",
"(",
"name",
",",
"arr",
")"
] |
Legacy initialization method.
Parameters
----------
name : str
Name of corresponding NDArray.
arr : NDArray
NDArray to be initialized.
|
[
"Legacy",
"initialization",
"method",
"."
] |
python
|
train
|
inspirehep/inspire-crawler
|
inspire_crawler/cli.py
|
https://github.com/inspirehep/inspire-crawler/blob/36d5cc0cd87cc597ba80e680b7de7254b120173a/inspire_crawler/cli.py#L147-L153
|
def get_job_results(id):
"""Get the crawl results from the job."""
crawler_job = models.CrawlerJob.query.filter_by(id=id).one()
_show_file(
file_path=crawler_job.results,
header_name='Results',
)
|
[
"def",
"get_job_results",
"(",
"id",
")",
":",
"crawler_job",
"=",
"models",
".",
"CrawlerJob",
".",
"query",
".",
"filter_by",
"(",
"id",
"=",
"id",
")",
".",
"one",
"(",
")",
"_show_file",
"(",
"file_path",
"=",
"crawler_job",
".",
"results",
",",
"header_name",
"=",
"'Results'",
",",
")"
] |
Get the crawl results from the job.
|
[
"Get",
"the",
"crawl",
"results",
"from",
"the",
"job",
"."
] |
python
|
train
|
ga4gh/ga4gh-server
|
ga4gh/server/datamodel/datasets.py
|
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/datasets.py#L233-L240
|
def getFeatureSetByName(self, name):
"""
Returns the FeatureSet with the specified name, or raises
an exception otherwise.
"""
if name not in self._featureSetNameMap:
raise exceptions.FeatureSetNameNotFoundException(name)
return self._featureSetNameMap[name]
|
[
"def",
"getFeatureSetByName",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_featureSetNameMap",
":",
"raise",
"exceptions",
".",
"FeatureSetNameNotFoundException",
"(",
"name",
")",
"return",
"self",
".",
"_featureSetNameMap",
"[",
"name",
"]"
] |
Returns the FeatureSet with the specified name, or raises
an exception otherwise.
|
[
"Returns",
"the",
"FeatureSet",
"with",
"the",
"specified",
"name",
"or",
"raises",
"an",
"exception",
"otherwise",
"."
] |
python
|
train
|
zsimic/runez
|
src/runez/base.py
|
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/base.py#L39-L55
|
def decode(value, strip=False):
"""Python 2/3 friendly decoding of output.
Args:
value (str | unicode | bytes | None): The value to decode.
strip (bool): If True, `strip()` the returned string. (Default value = False)
Returns:
str: Decoded value, if applicable.
"""
if value is None:
return None
if isinstance(value, bytes) and not isinstance(value, unicode):
value = value.decode("utf-8")
if strip:
return unicode(value).strip()
return unicode(value)
|
[
"def",
"decode",
"(",
"value",
",",
"strip",
"=",
"False",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"value",
",",
"bytes",
")",
"and",
"not",
"isinstance",
"(",
"value",
",",
"unicode",
")",
":",
"value",
"=",
"value",
".",
"decode",
"(",
"\"utf-8\"",
")",
"if",
"strip",
":",
"return",
"unicode",
"(",
"value",
")",
".",
"strip",
"(",
")",
"return",
"unicode",
"(",
"value",
")"
] |
Python 2/3 friendly decoding of output.
Args:
value (str | unicode | bytes | None): The value to decode.
strip (bool): If True, `strip()` the returned string. (Default value = False)
Returns:
str: Decoded value, if applicable.
|
[
"Python",
"2",
"/",
"3",
"friendly",
"decoding",
"of",
"output",
"."
] |
python
|
train
|
ericmjl/pyflatten
|
pyflatten/__init__.py
|
https://github.com/ericmjl/pyflatten/blob/2a8f4a9a3164e4799a4086abe4c69cc89afc3b67/pyflatten/__init__.py#L14-L74
|
def flatten(value):
"""value can be any nesting of tuples, arrays, dicts.
returns 1D numpy array and an unflatten function."""
if isinstance(value, np.ndarray):
def unflatten(vector):
return np.reshape(vector, value.shape)
return np.ravel(value), unflatten
elif isinstance(value, float):
return np.array([value]), lambda x: x[0]
elif isinstance(value, tuple):
if not value:
return np.array([]), lambda x: ()
flattened_first, unflatten_first = flatten(value[0])
flattened_rest, unflatten_rest = flatten(value[1:])
def unflatten(vector):
N = len(flattened_first)
return (unflatten_first(vector[:N]),) + unflatten_rest(vector[N:])
return np.concatenate((flattened_first, flattened_rest)), unflatten
elif isinstance(value, list):
if not value:
return np.array([]), lambda x: []
flattened_first, unflatten_first = flatten(value[0])
flattened_rest, unflatten_rest = flatten(value[1:])
def unflatten(vector):
N = len(flattened_first)
return [unflatten_first(vector[:N])] + unflatten_rest(vector[N:])
return np.concatenate((flattened_first, flattened_rest)), unflatten
elif isinstance(value, dict):
flattened = []
unflatteners = []
lengths = []
keys = []
for k, v in sorted(value.items(), key=itemgetter(0)):
cur_flattened, cur_unflatten = flatten(v)
flattened.append(cur_flattened)
unflatteners.append(cur_unflatten)
lengths.append(len(cur_flattened))
keys.append(k)
def unflatten(vector):
split_ixs = np.cumsum(lengths)
pieces = np.split(vector, split_ixs)
return {key: unflattener(piece)
for piece, unflattener, key in zip(pieces,
unflatteners,
keys)}
return np.concatenate(flattened), unflatten
else:
raise Exception("Don't know how to flatten type {}".format(type(value))
)
|
[
"def",
"flatten",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"np",
".",
"ndarray",
")",
":",
"def",
"unflatten",
"(",
"vector",
")",
":",
"return",
"np",
".",
"reshape",
"(",
"vector",
",",
"value",
".",
"shape",
")",
"return",
"np",
".",
"ravel",
"(",
"value",
")",
",",
"unflatten",
"elif",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"value",
"]",
")",
",",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"value",
",",
"tuple",
")",
":",
"if",
"not",
"value",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"lambda",
"x",
":",
"(",
")",
"flattened_first",
",",
"unflatten_first",
"=",
"flatten",
"(",
"value",
"[",
"0",
"]",
")",
"flattened_rest",
",",
"unflatten_rest",
"=",
"flatten",
"(",
"value",
"[",
"1",
":",
"]",
")",
"def",
"unflatten",
"(",
"vector",
")",
":",
"N",
"=",
"len",
"(",
"flattened_first",
")",
"return",
"(",
"unflatten_first",
"(",
"vector",
"[",
":",
"N",
"]",
")",
",",
")",
"+",
"unflatten_rest",
"(",
"vector",
"[",
"N",
":",
"]",
")",
"return",
"np",
".",
"concatenate",
"(",
"(",
"flattened_first",
",",
"flattened_rest",
")",
")",
",",
"unflatten",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"if",
"not",
"value",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"lambda",
"x",
":",
"[",
"]",
"flattened_first",
",",
"unflatten_first",
"=",
"flatten",
"(",
"value",
"[",
"0",
"]",
")",
"flattened_rest",
",",
"unflatten_rest",
"=",
"flatten",
"(",
"value",
"[",
"1",
":",
"]",
")",
"def",
"unflatten",
"(",
"vector",
")",
":",
"N",
"=",
"len",
"(",
"flattened_first",
")",
"return",
"[",
"unflatten_first",
"(",
"vector",
"[",
":",
"N",
"]",
")",
"]",
"+",
"unflatten_rest",
"(",
"vector",
"[",
"N",
":",
"]",
")",
"return",
"np",
".",
"concatenate",
"(",
"(",
"flattened_first",
",",
"flattened_rest",
")",
")",
",",
"unflatten",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"flattened",
"=",
"[",
"]",
"unflatteners",
"=",
"[",
"]",
"lengths",
"=",
"[",
"]",
"keys",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"value",
".",
"items",
"(",
")",
",",
"key",
"=",
"itemgetter",
"(",
"0",
")",
")",
":",
"cur_flattened",
",",
"cur_unflatten",
"=",
"flatten",
"(",
"v",
")",
"flattened",
".",
"append",
"(",
"cur_flattened",
")",
"unflatteners",
".",
"append",
"(",
"cur_unflatten",
")",
"lengths",
".",
"append",
"(",
"len",
"(",
"cur_flattened",
")",
")",
"keys",
".",
"append",
"(",
"k",
")",
"def",
"unflatten",
"(",
"vector",
")",
":",
"split_ixs",
"=",
"np",
".",
"cumsum",
"(",
"lengths",
")",
"pieces",
"=",
"np",
".",
"split",
"(",
"vector",
",",
"split_ixs",
")",
"return",
"{",
"key",
":",
"unflattener",
"(",
"piece",
")",
"for",
"piece",
",",
"unflattener",
",",
"key",
"in",
"zip",
"(",
"pieces",
",",
"unflatteners",
",",
"keys",
")",
"}",
"return",
"np",
".",
"concatenate",
"(",
"flattened",
")",
",",
"unflatten",
"else",
":",
"raise",
"Exception",
"(",
"\"Don't know how to flatten type {}\"",
".",
"format",
"(",
"type",
"(",
"value",
")",
")",
")"
] |
value can be any nesting of tuples, arrays, dicts.
returns 1D numpy array and an unflatten function.
|
[
"value",
"can",
"be",
"any",
"nesting",
"of",
"tuples",
"arrays",
"dicts",
".",
"returns",
"1D",
"numpy",
"array",
"and",
"an",
"unflatten",
"function",
"."
] |
python
|
train
|
honzajavorek/redis-collections
|
redis_collections/sortedsets.py
|
https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/sortedsets.py#L237-L248
|
def get_score(self, member, default=None, pipe=None):
"""
Return the score of *member*, or *default* if it is not in the
collection.
"""
pipe = self.redis if pipe is None else pipe
score = pipe.zscore(self.key, self._pickle(member))
if (score is None) and (default is not None):
score = float(default)
return score
|
[
"def",
"get_score",
"(",
"self",
",",
"member",
",",
"default",
"=",
"None",
",",
"pipe",
"=",
"None",
")",
":",
"pipe",
"=",
"self",
".",
"redis",
"if",
"pipe",
"is",
"None",
"else",
"pipe",
"score",
"=",
"pipe",
".",
"zscore",
"(",
"self",
".",
"key",
",",
"self",
".",
"_pickle",
"(",
"member",
")",
")",
"if",
"(",
"score",
"is",
"None",
")",
"and",
"(",
"default",
"is",
"not",
"None",
")",
":",
"score",
"=",
"float",
"(",
"default",
")",
"return",
"score"
] |
Return the score of *member*, or *default* if it is not in the
collection.
|
[
"Return",
"the",
"score",
"of",
"*",
"member",
"*",
"or",
"*",
"default",
"*",
"if",
"it",
"is",
"not",
"in",
"the",
"collection",
"."
] |
python
|
train
|
pkkid/python-plexapi
|
plexapi/myplex.py
|
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/myplex.py#L336-L354
|
def _getSectionIds(self, server, sections):
""" Converts a list of section objects or names to sectionIds needed for library sharing. """
if not sections: return []
# Get a list of all section ids for looking up each section.
allSectionIds = {}
machineIdentifier = server.machineIdentifier if isinstance(server, PlexServer) else server
url = self.PLEXSERVERS.replace('{machineId}', machineIdentifier)
data = self.query(url, self._session.get)
for elem in data[0]:
allSectionIds[elem.attrib.get('id', '').lower()] = elem.attrib.get('id')
allSectionIds[elem.attrib.get('title', '').lower()] = elem.attrib.get('id')
allSectionIds[elem.attrib.get('key', '').lower()] = elem.attrib.get('id')
log.debug(allSectionIds)
# Convert passed in section items to section ids from above lookup
sectionIds = []
for section in sections:
sectionKey = section.key if isinstance(section, LibrarySection) else section
sectionIds.append(allSectionIds[sectionKey.lower()])
return sectionIds
|
[
"def",
"_getSectionIds",
"(",
"self",
",",
"server",
",",
"sections",
")",
":",
"if",
"not",
"sections",
":",
"return",
"[",
"]",
"# Get a list of all section ids for looking up each section.",
"allSectionIds",
"=",
"{",
"}",
"machineIdentifier",
"=",
"server",
".",
"machineIdentifier",
"if",
"isinstance",
"(",
"server",
",",
"PlexServer",
")",
"else",
"server",
"url",
"=",
"self",
".",
"PLEXSERVERS",
".",
"replace",
"(",
"'{machineId}'",
",",
"machineIdentifier",
")",
"data",
"=",
"self",
".",
"query",
"(",
"url",
",",
"self",
".",
"_session",
".",
"get",
")",
"for",
"elem",
"in",
"data",
"[",
"0",
"]",
":",
"allSectionIds",
"[",
"elem",
".",
"attrib",
".",
"get",
"(",
"'id'",
",",
"''",
")",
".",
"lower",
"(",
")",
"]",
"=",
"elem",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"allSectionIds",
"[",
"elem",
".",
"attrib",
".",
"get",
"(",
"'title'",
",",
"''",
")",
".",
"lower",
"(",
")",
"]",
"=",
"elem",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"allSectionIds",
"[",
"elem",
".",
"attrib",
".",
"get",
"(",
"'key'",
",",
"''",
")",
".",
"lower",
"(",
")",
"]",
"=",
"elem",
".",
"attrib",
".",
"get",
"(",
"'id'",
")",
"log",
".",
"debug",
"(",
"allSectionIds",
")",
"# Convert passed in section items to section ids from above lookup",
"sectionIds",
"=",
"[",
"]",
"for",
"section",
"in",
"sections",
":",
"sectionKey",
"=",
"section",
".",
"key",
"if",
"isinstance",
"(",
"section",
",",
"LibrarySection",
")",
"else",
"section",
"sectionIds",
".",
"append",
"(",
"allSectionIds",
"[",
"sectionKey",
".",
"lower",
"(",
")",
"]",
")",
"return",
"sectionIds"
] |
Converts a list of section objects or names to sectionIds needed for library sharing.
|
[
"Converts",
"a",
"list",
"of",
"section",
"objects",
"or",
"names",
"to",
"sectionIds",
"needed",
"for",
"library",
"sharing",
"."
] |
python
|
train
|
robmarkcole/HASS-data-detective
|
detective/config.py
|
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/config.py#L16-L29
|
def find_hass_config():
"""Try to find HASS config."""
if "HASSIO_TOKEN" in os.environ:
return "/config"
config_dir = default_hass_config_dir()
if os.path.isdir(config_dir):
return config_dir
raise ValueError(
"Unable to automatically find the location of Home Assistant "
"config. Please pass it in."
)
|
[
"def",
"find_hass_config",
"(",
")",
":",
"if",
"\"HASSIO_TOKEN\"",
"in",
"os",
".",
"environ",
":",
"return",
"\"/config\"",
"config_dir",
"=",
"default_hass_config_dir",
"(",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"config_dir",
")",
":",
"return",
"config_dir",
"raise",
"ValueError",
"(",
"\"Unable to automatically find the location of Home Assistant \"",
"\"config. Please pass it in.\"",
")"
] |
Try to find HASS config.
|
[
"Try",
"to",
"find",
"HASS",
"config",
"."
] |
python
|
train
|
rgmining/ria
|
ria/bipartite.py
|
https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L432-L460
|
def retrieve_review(self, reviewer, product):
"""Retrieve review that the given reviewer put the given product.
Args:
reviewer: An instance of Reviewer.
product: An instance of Product.
Returns:
A review object.
Raises:
TypeError: when given reviewer and product aren't instance of
specified reviewer and product class when this graph is constructed.
KeyError: When the reviewer does not review the product.
"""
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
elif not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
try:
return self.graph[reviewer][product]["review"]
except TypeError:
raise KeyError(
"{0} does not review {1}.".format(reviewer, product))
|
[
"def",
"retrieve_review",
"(",
"self",
",",
"reviewer",
",",
"product",
")",
":",
"if",
"not",
"isinstance",
"(",
"reviewer",
",",
"self",
".",
"_reviewer_cls",
")",
":",
"raise",
"TypeError",
"(",
"\"Type of given reviewer isn't acceptable:\"",
",",
"reviewer",
",",
"\", expected:\"",
",",
"self",
".",
"_reviewer_cls",
")",
"elif",
"not",
"isinstance",
"(",
"product",
",",
"self",
".",
"_product_cls",
")",
":",
"raise",
"TypeError",
"(",
"\"Type of given product isn't acceptable:\"",
",",
"product",
",",
"\", expected:\"",
",",
"self",
".",
"_product_cls",
")",
"try",
":",
"return",
"self",
".",
"graph",
"[",
"reviewer",
"]",
"[",
"product",
"]",
"[",
"\"review\"",
"]",
"except",
"TypeError",
":",
"raise",
"KeyError",
"(",
"\"{0} does not review {1}.\"",
".",
"format",
"(",
"reviewer",
",",
"product",
")",
")"
] |
Retrieve review that the given reviewer put the given product.
Args:
reviewer: An instance of Reviewer.
product: An instance of Product.
Returns:
A review object.
Raises:
TypeError: when given reviewer and product aren't instance of
specified reviewer and product class when this graph is constructed.
KeyError: When the reviewer does not review the product.
|
[
"Retrieve",
"review",
"that",
"the",
"given",
"reviewer",
"put",
"the",
"given",
"product",
"."
] |
python
|
train
|
log2timeline/plaso
|
plaso/output/shared_elastic.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/shared_elastic.py#L280-L287
|
def SetUsername(self, username):
"""Sets the username.
Args:
username (str): username to authenticate with.
"""
self._username = username
logger.debug('Elasticsearch username: {0!s}'.format(username))
|
[
"def",
"SetUsername",
"(",
"self",
",",
"username",
")",
":",
"self",
".",
"_username",
"=",
"username",
"logger",
".",
"debug",
"(",
"'Elasticsearch username: {0!s}'",
".",
"format",
"(",
"username",
")",
")"
] |
Sets the username.
Args:
username (str): username to authenticate with.
|
[
"Sets",
"the",
"username",
"."
] |
python
|
train
|
python-odin/odinweb
|
odinweb/decorators.py
|
https://github.com/python-odin/odinweb/blob/198424133584acc18cb41c8d18d91f803abc810f/odinweb/decorators.py#L503-L514
|
def create(callback=None, path=None, method=Method.POST, resource=None, tags=None, summary="Create a new resource",
middleware=None):
# type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation
"""
Decorator to configure an operation that creates a resource.
"""
def inner(c):
op = ResourceOperation(c, path or NoPath, method, resource, tags, summary, middleware)
op.responses.add(Response(HTTPStatus.CREATED, "{name} has been created"))
op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error))
return op
return inner(callback) if callback else inner
|
[
"def",
"create",
"(",
"callback",
"=",
"None",
",",
"path",
"=",
"None",
",",
"method",
"=",
"Method",
".",
"POST",
",",
"resource",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"summary",
"=",
"\"Create a new resource\"",
",",
"middleware",
"=",
"None",
")",
":",
"# type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation",
"def",
"inner",
"(",
"c",
")",
":",
"op",
"=",
"ResourceOperation",
"(",
"c",
",",
"path",
"or",
"NoPath",
",",
"method",
",",
"resource",
",",
"tags",
",",
"summary",
",",
"middleware",
")",
"op",
".",
"responses",
".",
"add",
"(",
"Response",
"(",
"HTTPStatus",
".",
"CREATED",
",",
"\"{name} has been created\"",
")",
")",
"op",
".",
"responses",
".",
"add",
"(",
"Response",
"(",
"HTTPStatus",
".",
"BAD_REQUEST",
",",
"\"Validation failed.\"",
",",
"Error",
")",
")",
"return",
"op",
"return",
"inner",
"(",
"callback",
")",
"if",
"callback",
"else",
"inner"
] |
Decorator to configure an operation that creates a resource.
|
[
"Decorator",
"to",
"configure",
"an",
"operation",
"that",
"creates",
"a",
"resource",
"."
] |
python
|
train
|
etal/biocma
|
biocma/utils.py
|
https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/utils.py#L159-L200
|
def get_inserts(block):
"""Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...}
"""
def find_inserts(seq, head_len):
"""Locate the lowercase regions in a character sequence.
Yield the insert ranges as tuples using 1-based numbering, shifted by
head_len.
"""
in_insert = False
curr_start = None
deletions = 0
for idx, is_lower in enumerate(map(str.islower, seq)):
if is_lower:
if not in_insert:
# Start of a new insert region
curr_start = head_len + idx + 1 - deletions
in_insert = True
else:
if in_insert:
# End of the current insert region
yield (curr_start, head_len + idx - deletions)
in_insert = False
if seq[idx] == '-':
deletions += 1
return dict((record['id'],
list(find_inserts(record['seq'], record['head_len'])))
for record in block['sequences'])
|
[
"def",
"get_inserts",
"(",
"block",
")",
":",
"def",
"find_inserts",
"(",
"seq",
",",
"head_len",
")",
":",
"\"\"\"Locate the lowercase regions in a character sequence.\n\n Yield the insert ranges as tuples using 1-based numbering, shifted by\n head_len.\n \"\"\"",
"in_insert",
"=",
"False",
"curr_start",
"=",
"None",
"deletions",
"=",
"0",
"for",
"idx",
",",
"is_lower",
"in",
"enumerate",
"(",
"map",
"(",
"str",
".",
"islower",
",",
"seq",
")",
")",
":",
"if",
"is_lower",
":",
"if",
"not",
"in_insert",
":",
"# Start of a new insert region",
"curr_start",
"=",
"head_len",
"+",
"idx",
"+",
"1",
"-",
"deletions",
"in_insert",
"=",
"True",
"else",
":",
"if",
"in_insert",
":",
"# End of the current insert region",
"yield",
"(",
"curr_start",
",",
"head_len",
"+",
"idx",
"-",
"deletions",
")",
"in_insert",
"=",
"False",
"if",
"seq",
"[",
"idx",
"]",
"==",
"'-'",
":",
"deletions",
"+=",
"1",
"return",
"dict",
"(",
"(",
"record",
"[",
"'id'",
"]",
",",
"list",
"(",
"find_inserts",
"(",
"record",
"[",
"'seq'",
"]",
",",
"record",
"[",
"'head_len'",
"]",
")",
")",
")",
"for",
"record",
"in",
"block",
"[",
"'sequences'",
"]",
")"
] |
Identify the inserts in sequence in a block.
Inserts are relative to the consensus (theoretically), and identified by
lowercase letters in the sequence. The returned integer pairs represent the
insert start and end positions in the full-length sequence, using one-based
numbering.
The first sequence of the CMA block is included, though it may just be the
consensus sequence, which shouldn't have any inserts.
Output:
{id1: [(start, end), (start, end), ...], id2: ..., ...}
|
[
"Identify",
"the",
"inserts",
"in",
"sequence",
"in",
"a",
"block",
"."
] |
python
|
train
|
dnanexus/dx-toolkit
|
src/python/dxpy/bindings/dxjob.py
|
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxjob.py#L175-L188
|
def set_id(self, dxid):
'''
:param dxid: New job ID to be associated with the handler (localjob IDs also accepted for local runs)
:type dxid: string
Discards the currently stored ID and associates the handler with *dxid*
'''
if dxid is not None:
if not (isinstance(dxid, basestring) and dxid.startswith('localjob-')):
# localjob IDs (which do not follow the usual ID
# syntax) should be allowed; otherwise, follow the
# usual syntax checking
verify_string_dxid(dxid, self._class)
self._dxid = dxid
|
[
"def",
"set_id",
"(",
"self",
",",
"dxid",
")",
":",
"if",
"dxid",
"is",
"not",
"None",
":",
"if",
"not",
"(",
"isinstance",
"(",
"dxid",
",",
"basestring",
")",
"and",
"dxid",
".",
"startswith",
"(",
"'localjob-'",
")",
")",
":",
"# localjob IDs (which do not follow the usual ID",
"# syntax) should be allowed; otherwise, follow the",
"# usual syntax checking",
"verify_string_dxid",
"(",
"dxid",
",",
"self",
".",
"_class",
")",
"self",
".",
"_dxid",
"=",
"dxid"
] |
:param dxid: New job ID to be associated with the handler (localjob IDs also accepted for local runs)
:type dxid: string
Discards the currently stored ID and associates the handler with *dxid*
|
[
":",
"param",
"dxid",
":",
"New",
"job",
"ID",
"to",
"be",
"associated",
"with",
"the",
"handler",
"(",
"localjob",
"IDs",
"also",
"accepted",
"for",
"local",
"runs",
")",
":",
"type",
"dxid",
":",
"string"
] |
python
|
train
|
thiagopbueno/rddl2tf
|
rddl2tf/compiler.py
|
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L321-L338
|
def compile_action_preconditions_checking(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> tf.Tensor:
'''Combines the action preconditions into an applicability checking op.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A boolean tensor for checking if `action` is application in `state`.
'''
with self.graph.as_default():
with tf.name_scope('action_preconditions_checking'):
preconds = self.compile_action_preconditions(state, action)
all_preconds = tf.stack([p.tensor for p in preconds], axis=1)
checking = tf.reduce_all(all_preconds, axis=1)
return checking
|
[
"def",
"compile_action_preconditions_checking",
"(",
"self",
",",
"state",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
",",
"action",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
")",
"->",
"tf",
".",
"Tensor",
":",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'action_preconditions_checking'",
")",
":",
"preconds",
"=",
"self",
".",
"compile_action_preconditions",
"(",
"state",
",",
"action",
")",
"all_preconds",
"=",
"tf",
".",
"stack",
"(",
"[",
"p",
".",
"tensor",
"for",
"p",
"in",
"preconds",
"]",
",",
"axis",
"=",
"1",
")",
"checking",
"=",
"tf",
".",
"reduce_all",
"(",
"all_preconds",
",",
"axis",
"=",
"1",
")",
"return",
"checking"
] |
Combines the action preconditions into an applicability checking op.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A boolean tensor for checking if `action` is application in `state`.
|
[
"Combines",
"the",
"action",
"preconditions",
"into",
"an",
"applicability",
"checking",
"op",
"."
] |
python
|
train
|
GNS3/gns3-server
|
gns3server/compute/vmware/__init__.py
|
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vmware/__init__.py#L190-L240
|
def check_vmware_version(self):
"""
Check VMware version
"""
if sys.platform.startswith("win"):
# look for vmrun.exe using the directory listed in the registry
ws_version = self._find_vmware_version_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Workstation")
if ws_version is None:
player_version = self._find_vmware_version_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Player")
if player_version:
log.debug("VMware Player version {} detected".format(player_version))
yield from self._check_vmware_player_requirements(player_version)
else:
log.warning("Could not find VMware version")
self._host_type = "ws"
else:
log.debug("VMware Workstation version {} detected".format(ws_version))
yield from self._check_vmware_workstation_requirements(ws_version)
else:
if sys.platform.startswith("darwin"):
if not os.path.isdir("/Applications/VMware Fusion.app"):
raise VMwareError("VMware Fusion is not installed in the standard location /Applications/VMware Fusion.app")
self._host_type = "fusion"
return # FIXME: no version checking on Mac OS X but we support all versions of fusion
vmware_path = VMware._get_linux_vmware_binary()
if vmware_path is None:
raise VMwareError("VMware is not installed (vmware or vmplayer executable could not be found in $PATH)")
try:
output = yield from subprocess_check_output(vmware_path, "-v")
match = re.search("VMware Workstation ([0-9]+)\.", output)
version = None
if match:
# VMware Workstation has been detected
version = match.group(1)
log.debug("VMware Workstation version {} detected".format(version))
yield from self._check_vmware_workstation_requirements(version)
match = re.search("VMware Player ([0-9]+)\.", output)
if match:
# VMware Player has been detected
version = match.group(1)
log.debug("VMware Player version {} detected".format(version))
yield from self._check_vmware_player_requirements(version)
if version is None:
log.warning("Could not find VMware version. Output of VMware: {}".format(output))
raise VMwareError("Could not find VMware version. Output of VMware: {}".format(output))
except (OSError, subprocess.SubprocessError) as e:
log.error("Error while looking for the VMware version: {}".format(e))
raise VMwareError("Error while looking for the VMware version: {}".format(e))
|
[
"def",
"check_vmware_version",
"(",
"self",
")",
":",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"win\"",
")",
":",
"# look for vmrun.exe using the directory listed in the registry",
"ws_version",
"=",
"self",
".",
"_find_vmware_version_registry",
"(",
"r\"SOFTWARE\\Wow6432Node\\VMware, Inc.\\VMware Workstation\"",
")",
"if",
"ws_version",
"is",
"None",
":",
"player_version",
"=",
"self",
".",
"_find_vmware_version_registry",
"(",
"r\"SOFTWARE\\Wow6432Node\\VMware, Inc.\\VMware Player\"",
")",
"if",
"player_version",
":",
"log",
".",
"debug",
"(",
"\"VMware Player version {} detected\"",
".",
"format",
"(",
"player_version",
")",
")",
"yield",
"from",
"self",
".",
"_check_vmware_player_requirements",
"(",
"player_version",
")",
"else",
":",
"log",
".",
"warning",
"(",
"\"Could not find VMware version\"",
")",
"self",
".",
"_host_type",
"=",
"\"ws\"",
"else",
":",
"log",
".",
"debug",
"(",
"\"VMware Workstation version {} detected\"",
".",
"format",
"(",
"ws_version",
")",
")",
"yield",
"from",
"self",
".",
"_check_vmware_workstation_requirements",
"(",
"ws_version",
")",
"else",
":",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"darwin\"",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"\"/Applications/VMware Fusion.app\"",
")",
":",
"raise",
"VMwareError",
"(",
"\"VMware Fusion is not installed in the standard location /Applications/VMware Fusion.app\"",
")",
"self",
".",
"_host_type",
"=",
"\"fusion\"",
"return",
"# FIXME: no version checking on Mac OS X but we support all versions of fusion",
"vmware_path",
"=",
"VMware",
".",
"_get_linux_vmware_binary",
"(",
")",
"if",
"vmware_path",
"is",
"None",
":",
"raise",
"VMwareError",
"(",
"\"VMware is not installed (vmware or vmplayer executable could not be found in $PATH)\"",
")",
"try",
":",
"output",
"=",
"yield",
"from",
"subprocess_check_output",
"(",
"vmware_path",
",",
"\"-v\"",
")",
"match",
"=",
"re",
".",
"search",
"(",
"\"VMware Workstation ([0-9]+)\\.\"",
",",
"output",
")",
"version",
"=",
"None",
"if",
"match",
":",
"# VMware Workstation has been detected",
"version",
"=",
"match",
".",
"group",
"(",
"1",
")",
"log",
".",
"debug",
"(",
"\"VMware Workstation version {} detected\"",
".",
"format",
"(",
"version",
")",
")",
"yield",
"from",
"self",
".",
"_check_vmware_workstation_requirements",
"(",
"version",
")",
"match",
"=",
"re",
".",
"search",
"(",
"\"VMware Player ([0-9]+)\\.\"",
",",
"output",
")",
"if",
"match",
":",
"# VMware Player has been detected",
"version",
"=",
"match",
".",
"group",
"(",
"1",
")",
"log",
".",
"debug",
"(",
"\"VMware Player version {} detected\"",
".",
"format",
"(",
"version",
")",
")",
"yield",
"from",
"self",
".",
"_check_vmware_player_requirements",
"(",
"version",
")",
"if",
"version",
"is",
"None",
":",
"log",
".",
"warning",
"(",
"\"Could not find VMware version. Output of VMware: {}\"",
".",
"format",
"(",
"output",
")",
")",
"raise",
"VMwareError",
"(",
"\"Could not find VMware version. Output of VMware: {}\"",
".",
"format",
"(",
"output",
")",
")",
"except",
"(",
"OSError",
",",
"subprocess",
".",
"SubprocessError",
")",
"as",
"e",
":",
"log",
".",
"error",
"(",
"\"Error while looking for the VMware version: {}\"",
".",
"format",
"(",
"e",
")",
")",
"raise",
"VMwareError",
"(",
"\"Error while looking for the VMware version: {}\"",
".",
"format",
"(",
"e",
")",
")"
] |
Check VMware version
|
[
"Check",
"VMware",
"version"
] |
python
|
train
|
ellmetha/django-machina
|
machina/apps/forum/views.py
|
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum/views.py#L80-L89
|
def get_queryset(self):
""" Returns the list of items for this view. """
self.forum = self.get_forum()
qs = (
self.forum.topics
.exclude(type=Topic.TOPIC_ANNOUNCE)
.exclude(approved=False)
.select_related('poster', 'last_post', 'last_post__poster')
)
return qs
|
[
"def",
"get_queryset",
"(",
"self",
")",
":",
"self",
".",
"forum",
"=",
"self",
".",
"get_forum",
"(",
")",
"qs",
"=",
"(",
"self",
".",
"forum",
".",
"topics",
".",
"exclude",
"(",
"type",
"=",
"Topic",
".",
"TOPIC_ANNOUNCE",
")",
".",
"exclude",
"(",
"approved",
"=",
"False",
")",
".",
"select_related",
"(",
"'poster'",
",",
"'last_post'",
",",
"'last_post__poster'",
")",
")",
"return",
"qs"
] |
Returns the list of items for this view.
|
[
"Returns",
"the",
"list",
"of",
"items",
"for",
"this",
"view",
"."
] |
python
|
train
|
hugapi/hug
|
hug/output_format.py
|
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/output_format.py#L277-L295
|
def on_content_type(handlers, default=None, error='The requested content type does not match any of those allowed'):
"""Returns a content in a different format based on the clients provided content type,
should pass in a dict with the following format:
{'[content-type]': action,
...
}
"""
def output_type(data, request, response):
handler = handlers.get(request.content_type.split(';')[0], default)
if not handler:
raise falcon.HTTPNotAcceptable(error)
response.content_type = handler.content_type
return handler(data, request=request, response=response)
output_type.__doc__ = 'Supports any of the following formats: {0}'.format(', '.join(
function.__doc__ or function.__name__ for function in handlers.values()))
output_type.content_type = ', '.join(handlers.keys())
return output_type
|
[
"def",
"on_content_type",
"(",
"handlers",
",",
"default",
"=",
"None",
",",
"error",
"=",
"'The requested content type does not match any of those allowed'",
")",
":",
"def",
"output_type",
"(",
"data",
",",
"request",
",",
"response",
")",
":",
"handler",
"=",
"handlers",
".",
"get",
"(",
"request",
".",
"content_type",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
",",
"default",
")",
"if",
"not",
"handler",
":",
"raise",
"falcon",
".",
"HTTPNotAcceptable",
"(",
"error",
")",
"response",
".",
"content_type",
"=",
"handler",
".",
"content_type",
"return",
"handler",
"(",
"data",
",",
"request",
"=",
"request",
",",
"response",
"=",
"response",
")",
"output_type",
".",
"__doc__",
"=",
"'Supports any of the following formats: {0}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"function",
".",
"__doc__",
"or",
"function",
".",
"__name__",
"for",
"function",
"in",
"handlers",
".",
"values",
"(",
")",
")",
")",
"output_type",
".",
"content_type",
"=",
"', '",
".",
"join",
"(",
"handlers",
".",
"keys",
"(",
")",
")",
"return",
"output_type"
] |
Returns a content in a different format based on the clients provided content type,
should pass in a dict with the following format:
{'[content-type]': action,
...
}
|
[
"Returns",
"a",
"content",
"in",
"a",
"different",
"format",
"based",
"on",
"the",
"clients",
"provided",
"content",
"type",
"should",
"pass",
"in",
"a",
"dict",
"with",
"the",
"following",
"format",
":"
] |
python
|
train
|
antiboredom/videogrep
|
videogrep/videogrep.py
|
https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L309-L371
|
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
|
[
"def",
"compose_from_srts",
"(",
"srts",
",",
"search",
",",
"searchtype",
")",
":",
"composition",
"=",
"[",
"]",
"foundSearchTerm",
"=",
"False",
"# Iterate over each subtitles file.",
"for",
"srt",
"in",
"srts",
":",
"print",
"(",
"srt",
")",
"lines",
"=",
"clean_srt",
"(",
"srt",
")",
"videofile",
"=",
"\"\"",
"foundVideoFile",
"=",
"False",
"print",
"(",
"\"[+] Searching for video file corresponding to '\"",
"+",
"srt",
"+",
"\"'.\"",
")",
"for",
"ext",
"in",
"usable_extensions",
":",
"tempVideoFile",
"=",
"srt",
".",
"replace",
"(",
"'.srt'",
",",
"'.'",
"+",
"ext",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"tempVideoFile",
")",
":",
"videofile",
"=",
"tempVideoFile",
"foundVideoFile",
"=",
"True",
"print",
"(",
"\"[+] Found '\"",
"+",
"tempVideoFile",
"+",
"\"'.\"",
")",
"# If a correspndong video file was found for this subtitles file...",
"if",
"foundVideoFile",
":",
"# Check that the subtitles file contains subtitles.",
"if",
"lines",
":",
"# Iterate over each line in the current subtitles file.",
"for",
"timespan",
"in",
"lines",
".",
"keys",
"(",
")",
":",
"line",
"=",
"lines",
"[",
"timespan",
"]",
".",
"strip",
"(",
")",
"# If this line contains the search term",
"if",
"search_line",
"(",
"line",
",",
"search",
",",
"searchtype",
")",
":",
"foundSearchTerm",
"=",
"True",
"# Extract the timespan for this subtitle.",
"start",
",",
"end",
"=",
"convert_timespan",
"(",
"timespan",
")",
"# Record this occurance of the search term.",
"composition",
".",
"append",
"(",
"{",
"'file'",
":",
"videofile",
",",
"'time'",
":",
"timespan",
",",
"'start'",
":",
"start",
",",
"'end'",
":",
"end",
",",
"'line'",
":",
"line",
"}",
")",
"# If the search was unsuccessful.",
"if",
"foundSearchTerm",
"is",
"False",
":",
"print",
"(",
"\"[!] Search term '\"",
"+",
"search",
"+",
"\"'\"",
"+",
"\" was not found is subtitle file '\"",
"+",
"srt",
"+",
"\"'.\"",
")",
"# If no subtitles were found in the current file.",
"else",
":",
"print",
"(",
"\"[!] Subtitle file '\"",
"+",
"srt",
"+",
"\"' is empty.\"",
")",
"# If no video file was found...",
"else",
":",
"print",
"(",
"\"[!] No video file was found which corresponds to subtitle file '\"",
"+",
"srt",
"+",
"\"'.\"",
")",
"print",
"(",
"\"[!] The following video formats are currently supported:\"",
")",
"extList",
"=",
"\"\"",
"for",
"ext",
"in",
"usable_extensions",
":",
"extList",
"+=",
"ext",
"+",
"\", \"",
"print",
"(",
"extList",
")",
"return",
"composition"
] |
Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
|
[
"Takes",
"a",
"list",
"of",
"subtitle",
"(",
"srt",
")",
"filenames",
"search",
"term",
"and",
"search",
"type",
"and",
"returns",
"a",
"list",
"of",
"timestamps",
"for",
"composing",
"a",
"supercut",
"."
] |
python
|
train
|
allenai/allennlp
|
scripts/check_requirements_and_setup.py
|
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/scripts/check_requirements_and_setup.py#L32-L60
|
def parse_requirements() -> Tuple[PackagesType, PackagesType, Set[str]]:
"""Parse all dependencies out of the requirements.txt file."""
essential_packages: PackagesType = {}
other_packages: PackagesType = {}
duplicates: Set[str] = set()
with open("requirements.txt", "r") as req_file:
section: str = ""
for line in req_file:
line = line.strip()
if line.startswith("####"):
# Line is a section name.
section = parse_section_name(line)
continue
if not line or line.startswith("#"):
# Line is empty or just regular comment.
continue
module, version = parse_package(line)
if module in essential_packages or module in other_packages:
duplicates.add(module)
if section.startswith("ESSENTIAL"):
essential_packages[module] = version
else:
other_packages[module] = version
return essential_packages, other_packages, duplicates
|
[
"def",
"parse_requirements",
"(",
")",
"->",
"Tuple",
"[",
"PackagesType",
",",
"PackagesType",
",",
"Set",
"[",
"str",
"]",
"]",
":",
"essential_packages",
":",
"PackagesType",
"=",
"{",
"}",
"other_packages",
":",
"PackagesType",
"=",
"{",
"}",
"duplicates",
":",
"Set",
"[",
"str",
"]",
"=",
"set",
"(",
")",
"with",
"open",
"(",
"\"requirements.txt\"",
",",
"\"r\"",
")",
"as",
"req_file",
":",
"section",
":",
"str",
"=",
"\"\"",
"for",
"line",
"in",
"req_file",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"\"####\"",
")",
":",
"# Line is a section name.",
"section",
"=",
"parse_section_name",
"(",
"line",
")",
"continue",
"if",
"not",
"line",
"or",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"# Line is empty or just regular comment.",
"continue",
"module",
",",
"version",
"=",
"parse_package",
"(",
"line",
")",
"if",
"module",
"in",
"essential_packages",
"or",
"module",
"in",
"other_packages",
":",
"duplicates",
".",
"add",
"(",
"module",
")",
"if",
"section",
".",
"startswith",
"(",
"\"ESSENTIAL\"",
")",
":",
"essential_packages",
"[",
"module",
"]",
"=",
"version",
"else",
":",
"other_packages",
"[",
"module",
"]",
"=",
"version",
"return",
"essential_packages",
",",
"other_packages",
",",
"duplicates"
] |
Parse all dependencies out of the requirements.txt file.
|
[
"Parse",
"all",
"dependencies",
"out",
"of",
"the",
"requirements",
".",
"txt",
"file",
"."
] |
python
|
train
|
Zsailer/kubeconf
|
kubeconf/kubeconf.py
|
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L303-L309
|
def get_context(self, name):
"""Get context from kubeconfig."""
contexts = self.data['contexts']
for context in contexts:
if context['name'] == name:
return context
raise KubeConfError("context name not found.")
|
[
"def",
"get_context",
"(",
"self",
",",
"name",
")",
":",
"contexts",
"=",
"self",
".",
"data",
"[",
"'contexts'",
"]",
"for",
"context",
"in",
"contexts",
":",
"if",
"context",
"[",
"'name'",
"]",
"==",
"name",
":",
"return",
"context",
"raise",
"KubeConfError",
"(",
"\"context name not found.\"",
")"
] |
Get context from kubeconfig.
|
[
"Get",
"context",
"from",
"kubeconfig",
"."
] |
python
|
train
|
googleapis/google-cloud-python
|
pubsub/google/cloud/pubsub_v1/subscriber/_protocol/dispatcher.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/subscriber/_protocol/dispatcher.py#L131-L138
|
def lease(self, items):
"""Add the given messages to lease management.
Args:
items(Sequence[LeaseRequest]): The items to lease.
"""
self._manager.leaser.add(items)
self._manager.maybe_pause_consumer()
|
[
"def",
"lease",
"(",
"self",
",",
"items",
")",
":",
"self",
".",
"_manager",
".",
"leaser",
".",
"add",
"(",
"items",
")",
"self",
".",
"_manager",
".",
"maybe_pause_consumer",
"(",
")"
] |
Add the given messages to lease management.
Args:
items(Sequence[LeaseRequest]): The items to lease.
|
[
"Add",
"the",
"given",
"messages",
"to",
"lease",
"management",
"."
] |
python
|
train
|
GNS3/gns3-server
|
gns3server/compute/vpcs/vpcs_vm.py
|
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vpcs/vpcs_vm.py#L442-L465
|
def stop_capture(self, port_number):
"""
Stops a packet capture.
:param port_number: port number
"""
if not self._ethernet_adapter.port_exists(port_number):
raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter,
port_number=port_number))
nio = self._ethernet_adapter.get_nio(0)
if not nio:
raise VPCSError("Port {} is not connected".format(port_number))
nio.stopPacketCapture()
if self.ubridge:
yield from self._ubridge_send('bridge stop_capture {name}'.format(name="VPCS-{}".format(self._id)))
log.info("VPCS '{name}' [{id}]: stopping packet capture on port {port_number}".format(name=self.name,
id=self.id,
port_number=port_number))
|
[
"def",
"stop_capture",
"(",
"self",
",",
"port_number",
")",
":",
"if",
"not",
"self",
".",
"_ethernet_adapter",
".",
"port_exists",
"(",
"port_number",
")",
":",
"raise",
"VPCSError",
"(",
"\"Port {port_number} doesn't exist in adapter {adapter}\"",
".",
"format",
"(",
"adapter",
"=",
"self",
".",
"_ethernet_adapter",
",",
"port_number",
"=",
"port_number",
")",
")",
"nio",
"=",
"self",
".",
"_ethernet_adapter",
".",
"get_nio",
"(",
"0",
")",
"if",
"not",
"nio",
":",
"raise",
"VPCSError",
"(",
"\"Port {} is not connected\"",
".",
"format",
"(",
"port_number",
")",
")",
"nio",
".",
"stopPacketCapture",
"(",
")",
"if",
"self",
".",
"ubridge",
":",
"yield",
"from",
"self",
".",
"_ubridge_send",
"(",
"'bridge stop_capture {name}'",
".",
"format",
"(",
"name",
"=",
"\"VPCS-{}\"",
".",
"format",
"(",
"self",
".",
"_id",
")",
")",
")",
"log",
".",
"info",
"(",
"\"VPCS '{name}' [{id}]: stopping packet capture on port {port_number}\"",
".",
"format",
"(",
"name",
"=",
"self",
".",
"name",
",",
"id",
"=",
"self",
".",
"id",
",",
"port_number",
"=",
"port_number",
")",
")"
] |
Stops a packet capture.
:param port_number: port number
|
[
"Stops",
"a",
"packet",
"capture",
"."
] |
python
|
train
|
cloud-custodian/cloud-custodian
|
c7n/actions/network.py
|
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/actions/network.py#L152-L182
|
def resolve_group_names(self, r, target_group_ids, groups):
"""Resolve any security group names to the corresponding group ids
With the context of a given network attached resource.
"""
names = self.get_group_names(target_group_ids)
if not names:
return target_group_ids
target_group_ids = list(target_group_ids)
vpc_id = self.vpc_expr.search(r)
if not vpc_id:
raise PolicyExecutionError(self._format_error(
"policy:{policy} non vpc attached resource used "
"with modify-security-group: {resource_id}",
resource_id=r[self.manager.resource_type.id]))
found = False
for n in names:
for g in groups:
if g['GroupName'] == n and g['VpcId'] == vpc_id:
found = g['GroupId']
if not found:
raise PolicyExecutionError(self._format_error((
"policy:{policy} could not resolve sg:{name} for "
"resource:{resource_id} in vpc:{vpc}"),
name=n,
resource_id=r[self.manager.resource_type.id], vpc=vpc_id))
target_group_ids.remove(n)
target_group_ids.append(found)
return target_group_ids
|
[
"def",
"resolve_group_names",
"(",
"self",
",",
"r",
",",
"target_group_ids",
",",
"groups",
")",
":",
"names",
"=",
"self",
".",
"get_group_names",
"(",
"target_group_ids",
")",
"if",
"not",
"names",
":",
"return",
"target_group_ids",
"target_group_ids",
"=",
"list",
"(",
"target_group_ids",
")",
"vpc_id",
"=",
"self",
".",
"vpc_expr",
".",
"search",
"(",
"r",
")",
"if",
"not",
"vpc_id",
":",
"raise",
"PolicyExecutionError",
"(",
"self",
".",
"_format_error",
"(",
"\"policy:{policy} non vpc attached resource used \"",
"\"with modify-security-group: {resource_id}\"",
",",
"resource_id",
"=",
"r",
"[",
"self",
".",
"manager",
".",
"resource_type",
".",
"id",
"]",
")",
")",
"found",
"=",
"False",
"for",
"n",
"in",
"names",
":",
"for",
"g",
"in",
"groups",
":",
"if",
"g",
"[",
"'GroupName'",
"]",
"==",
"n",
"and",
"g",
"[",
"'VpcId'",
"]",
"==",
"vpc_id",
":",
"found",
"=",
"g",
"[",
"'GroupId'",
"]",
"if",
"not",
"found",
":",
"raise",
"PolicyExecutionError",
"(",
"self",
".",
"_format_error",
"(",
"(",
"\"policy:{policy} could not resolve sg:{name} for \"",
"\"resource:{resource_id} in vpc:{vpc}\"",
")",
",",
"name",
"=",
"n",
",",
"resource_id",
"=",
"r",
"[",
"self",
".",
"manager",
".",
"resource_type",
".",
"id",
"]",
",",
"vpc",
"=",
"vpc_id",
")",
")",
"target_group_ids",
".",
"remove",
"(",
"n",
")",
"target_group_ids",
".",
"append",
"(",
"found",
")",
"return",
"target_group_ids"
] |
Resolve any security group names to the corresponding group ids
With the context of a given network attached resource.
|
[
"Resolve",
"any",
"security",
"group",
"names",
"to",
"the",
"corresponding",
"group",
"ids"
] |
python
|
train
|
wohlgejm/accountable
|
accountable/cli.py
|
https://github.com/wohlgejm/accountable/blob/20586365ccd319061e5548ce14fb0b8f449580fa/accountable/cli.py#L209-L216
|
def update(accountable, options):
"""
Update an existing issue.
"""
issue = accountable.issue_update(options)
headers = issue.keys()
rows = [headers, [v for k, v in issue.items()]]
print_table(SingleTable(rows))
|
[
"def",
"update",
"(",
"accountable",
",",
"options",
")",
":",
"issue",
"=",
"accountable",
".",
"issue_update",
"(",
"options",
")",
"headers",
"=",
"issue",
".",
"keys",
"(",
")",
"rows",
"=",
"[",
"headers",
",",
"[",
"v",
"for",
"k",
",",
"v",
"in",
"issue",
".",
"items",
"(",
")",
"]",
"]",
"print_table",
"(",
"SingleTable",
"(",
"rows",
")",
")"
] |
Update an existing issue.
|
[
"Update",
"an",
"existing",
"issue",
"."
] |
python
|
train
|
pytroll/pyspectral
|
pyspectral/atm_correction_ir.py
|
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/pyspectral/atm_correction_ir.py#L98-L132
|
def viewzen_corr(data, view_zen):
"""Apply atmospheric correction on the given *data* using the
specified satellite zenith angles (*view_zen*). Both input data
are given as 2-dimensional Numpy (masked) arrays, and they should
have equal shapes.
The *data* array will be changed in place and has to be copied before.
"""
def ratio(value, v_null, v_ref):
return (value - v_null) / (v_ref - v_null)
def tau0(t):
T_0 = 210.0
T_REF = 320.0
TAU_REF = 9.85
return (1 + TAU_REF)**ratio(t, T_0, T_REF) - 1
def tau(t):
T_0 = 170.0
T_REF = 295.0
TAU_REF = 1.0
M = 4
return TAU_REF * ratio(t, T_0, T_REF)**M
def delta(z):
Z_0 = 0.0
Z_REF = 70.0
DELTA_REF = 6.2
return (1 + DELTA_REF)**ratio(z, Z_0, Z_REF) - 1
y0, x0 = np.ma.where(view_zen == 0)
data[y0, x0] += tau0(data[y0, x0])
y, x = np.ma.where((view_zen > 0) & (view_zen < 90) & (~data.mask))
data[y, x] += tau(data[y, x]) * delta(view_zen[y, x])
return data
|
[
"def",
"viewzen_corr",
"(",
"data",
",",
"view_zen",
")",
":",
"def",
"ratio",
"(",
"value",
",",
"v_null",
",",
"v_ref",
")",
":",
"return",
"(",
"value",
"-",
"v_null",
")",
"/",
"(",
"v_ref",
"-",
"v_null",
")",
"def",
"tau0",
"(",
"t",
")",
":",
"T_0",
"=",
"210.0",
"T_REF",
"=",
"320.0",
"TAU_REF",
"=",
"9.85",
"return",
"(",
"1",
"+",
"TAU_REF",
")",
"**",
"ratio",
"(",
"t",
",",
"T_0",
",",
"T_REF",
")",
"-",
"1",
"def",
"tau",
"(",
"t",
")",
":",
"T_0",
"=",
"170.0",
"T_REF",
"=",
"295.0",
"TAU_REF",
"=",
"1.0",
"M",
"=",
"4",
"return",
"TAU_REF",
"*",
"ratio",
"(",
"t",
",",
"T_0",
",",
"T_REF",
")",
"**",
"M",
"def",
"delta",
"(",
"z",
")",
":",
"Z_0",
"=",
"0.0",
"Z_REF",
"=",
"70.0",
"DELTA_REF",
"=",
"6.2",
"return",
"(",
"1",
"+",
"DELTA_REF",
")",
"**",
"ratio",
"(",
"z",
",",
"Z_0",
",",
"Z_REF",
")",
"-",
"1",
"y0",
",",
"x0",
"=",
"np",
".",
"ma",
".",
"where",
"(",
"view_zen",
"==",
"0",
")",
"data",
"[",
"y0",
",",
"x0",
"]",
"+=",
"tau0",
"(",
"data",
"[",
"y0",
",",
"x0",
"]",
")",
"y",
",",
"x",
"=",
"np",
".",
"ma",
".",
"where",
"(",
"(",
"view_zen",
">",
"0",
")",
"&",
"(",
"view_zen",
"<",
"90",
")",
"&",
"(",
"~",
"data",
".",
"mask",
")",
")",
"data",
"[",
"y",
",",
"x",
"]",
"+=",
"tau",
"(",
"data",
"[",
"y",
",",
"x",
"]",
")",
"*",
"delta",
"(",
"view_zen",
"[",
"y",
",",
"x",
"]",
")",
"return",
"data"
] |
Apply atmospheric correction on the given *data* using the
specified satellite zenith angles (*view_zen*). Both input data
are given as 2-dimensional Numpy (masked) arrays, and they should
have equal shapes.
The *data* array will be changed in place and has to be copied before.
|
[
"Apply",
"atmospheric",
"correction",
"on",
"the",
"given",
"*",
"data",
"*",
"using",
"the",
"specified",
"satellite",
"zenith",
"angles",
"(",
"*",
"view_zen",
"*",
")",
".",
"Both",
"input",
"data",
"are",
"given",
"as",
"2",
"-",
"dimensional",
"Numpy",
"(",
"masked",
")",
"arrays",
"and",
"they",
"should",
"have",
"equal",
"shapes",
".",
"The",
"*",
"data",
"*",
"array",
"will",
"be",
"changed",
"in",
"place",
"and",
"has",
"to",
"be",
"copied",
"before",
"."
] |
python
|
train
|
wtolson/gnsq
|
gnsq/consumer.py
|
https://github.com/wtolson/gnsq/blob/0fd02578b2c9c5fa30626d78579db2a46c10edac/gnsq/consumer.py#L251-L265
|
def close(self):
"""Immediately close all connections and stop workers."""
if not self.is_running:
return
self._state = CLOSED
self.logger.debug('killing %d worker(s)', len(self._killables))
self._killables.kill(block=False)
self.logger.debug('closing %d connection(s)', len(self._connections))
for conn in self._connections:
conn.close_stream()
self.on_close.send(self)
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_running",
":",
"return",
"self",
".",
"_state",
"=",
"CLOSED",
"self",
".",
"logger",
".",
"debug",
"(",
"'killing %d worker(s)'",
",",
"len",
"(",
"self",
".",
"_killables",
")",
")",
"self",
".",
"_killables",
".",
"kill",
"(",
"block",
"=",
"False",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'closing %d connection(s)'",
",",
"len",
"(",
"self",
".",
"_connections",
")",
")",
"for",
"conn",
"in",
"self",
".",
"_connections",
":",
"conn",
".",
"close_stream",
"(",
")",
"self",
".",
"on_close",
".",
"send",
"(",
"self",
")"
] |
Immediately close all connections and stop workers.
|
[
"Immediately",
"close",
"all",
"connections",
"and",
"stop",
"workers",
"."
] |
python
|
train
|
neovim/pynvim
|
scripts/logging_statement_modifier.py
|
https://github.com/neovim/pynvim/blob/5e577188e6d7133f597ad0ce60dc6a4b1314064a/scripts/logging_statement_modifier.py#L172-L197
|
def get_logging_level(logging_stmt, commented_out=False):
"""Determines the level of logging in a given logging statement. The string
representing this level is returned. False is returned if the method is
not a logging statement and thus has no level. None is returned if a level
should have been found but wasn't."""
regexp = RE_LOGGING_START_IN_COMMENT if commented_out else RE_LOGGING_START
ret = regexp.match(logging_stmt)
_, method_name, _, first_arg = ret.groups()
if method_name not in LOGGING_METHODS_OF_INTEREST:
logging.debug('skipping uninteresting logging call: %s' % method_name)
return False
if method_name != 'log':
return method_name
# if the method name did not specify the level, we must have a first_arg to extract the level from
if not first_arg:
logging.warning("logging.log statement found but we couldn't extract the first argument")
return None
# extract the level of logging from the first argument to the log() call
level = first_arg_to_level_name(first_arg)
if level is None:
logging.warning("arg does not contain any known level '%s'\n" % first_arg)
return None
return level
|
[
"def",
"get_logging_level",
"(",
"logging_stmt",
",",
"commented_out",
"=",
"False",
")",
":",
"regexp",
"=",
"RE_LOGGING_START_IN_COMMENT",
"if",
"commented_out",
"else",
"RE_LOGGING_START",
"ret",
"=",
"regexp",
".",
"match",
"(",
"logging_stmt",
")",
"_",
",",
"method_name",
",",
"_",
",",
"first_arg",
"=",
"ret",
".",
"groups",
"(",
")",
"if",
"method_name",
"not",
"in",
"LOGGING_METHODS_OF_INTEREST",
":",
"logging",
".",
"debug",
"(",
"'skipping uninteresting logging call: %s'",
"%",
"method_name",
")",
"return",
"False",
"if",
"method_name",
"!=",
"'log'",
":",
"return",
"method_name",
"# if the method name did not specify the level, we must have a first_arg to extract the level from",
"if",
"not",
"first_arg",
":",
"logging",
".",
"warning",
"(",
"\"logging.log statement found but we couldn't extract the first argument\"",
")",
"return",
"None",
"# extract the level of logging from the first argument to the log() call",
"level",
"=",
"first_arg_to_level_name",
"(",
"first_arg",
")",
"if",
"level",
"is",
"None",
":",
"logging",
".",
"warning",
"(",
"\"arg does not contain any known level '%s'\\n\"",
"%",
"first_arg",
")",
"return",
"None",
"return",
"level"
] |
Determines the level of logging in a given logging statement. The string
representing this level is returned. False is returned if the method is
not a logging statement and thus has no level. None is returned if a level
should have been found but wasn't.
|
[
"Determines",
"the",
"level",
"of",
"logging",
"in",
"a",
"given",
"logging",
"statement",
".",
"The",
"string",
"representing",
"this",
"level",
"is",
"returned",
".",
"False",
"is",
"returned",
"if",
"the",
"method",
"is",
"not",
"a",
"logging",
"statement",
"and",
"thus",
"has",
"no",
"level",
".",
"None",
"is",
"returned",
"if",
"a",
"level",
"should",
"have",
"been",
"found",
"but",
"wasn",
"t",
"."
] |
python
|
train
|
inveniosoftware-contrib/invenio-classifier
|
invenio_classifier/normalizer.py
|
https://github.com/inveniosoftware-contrib/invenio-classifier/blob/3c758cf34dca6bf0548e7da5de34e5f72e3b255e/invenio_classifier/normalizer.py#L645-L653
|
def _replace_greek_characters(line):
"""Replace greek characters in a string."""
for greek_char, replacement in iteritems(_GREEK_REPLACEMENTS):
try:
line = line.replace(greek_char, replacement)
except UnicodeDecodeError:
current_app.logger.exception("Unicode decoding error.")
return ""
return line
|
[
"def",
"_replace_greek_characters",
"(",
"line",
")",
":",
"for",
"greek_char",
",",
"replacement",
"in",
"iteritems",
"(",
"_GREEK_REPLACEMENTS",
")",
":",
"try",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"greek_char",
",",
"replacement",
")",
"except",
"UnicodeDecodeError",
":",
"current_app",
".",
"logger",
".",
"exception",
"(",
"\"Unicode decoding error.\"",
")",
"return",
"\"\"",
"return",
"line"
] |
Replace greek characters in a string.
|
[
"Replace",
"greek",
"characters",
"in",
"a",
"string",
"."
] |
python
|
train
|
lemieuxl/pyGenClean
|
pyGenClean/SexCheck/sex_check.py
|
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/SexCheck/sex_check.py#L531-L565
|
def checkArgs(args):
"""Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Check if we have the tped and the tfam files
for fileName in [args.bfile + i for i in [".bed", ".bim", ".fam"]]:
if not os.path.isfile(fileName):
msg = "%(fileName)s: no such file" % locals()
raise ProgramError(msg)
# Ceck the number of markers on chromosome 23
if args.nbChr23 < 0:
msg = ("{}: number of markers on chr 23 must be "
"positive".format(args.nbChr23))
raise ProgramError(msg)
# If we ask for LRR and BAF, we need a directory
if args.lrr_baf:
if not os.path.isdir(args.lrr_baf_raw_dir):
msg = "{}: no such directory".format(args.lrr_baf_raw_dir)
raise ProgramError(msg)
if args.lrr_baf_dpi < 10:
msg = "{}: DPI too low".format(args.dpi)
raise ProgramError(msg)
return True
|
[
"def",
"checkArgs",
"(",
"args",
")",
":",
"# Check if we have the tped and the tfam files",
"for",
"fileName",
"in",
"[",
"args",
".",
"bfile",
"+",
"i",
"for",
"i",
"in",
"[",
"\".bed\"",
",",
"\".bim\"",
",",
"\".fam\"",
"]",
"]",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"fileName",
")",
":",
"msg",
"=",
"\"%(fileName)s: no such file\"",
"%",
"locals",
"(",
")",
"raise",
"ProgramError",
"(",
"msg",
")",
"# Ceck the number of markers on chromosome 23",
"if",
"args",
".",
"nbChr23",
"<",
"0",
":",
"msg",
"=",
"(",
"\"{}: number of markers on chr 23 must be \"",
"\"positive\"",
".",
"format",
"(",
"args",
".",
"nbChr23",
")",
")",
"raise",
"ProgramError",
"(",
"msg",
")",
"# If we ask for LRR and BAF, we need a directory",
"if",
"args",
".",
"lrr_baf",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"args",
".",
"lrr_baf_raw_dir",
")",
":",
"msg",
"=",
"\"{}: no such directory\"",
".",
"format",
"(",
"args",
".",
"lrr_baf_raw_dir",
")",
"raise",
"ProgramError",
"(",
"msg",
")",
"if",
"args",
".",
"lrr_baf_dpi",
"<",
"10",
":",
"msg",
"=",
"\"{}: DPI too low\"",
".",
"format",
"(",
"args",
".",
"dpi",
")",
"raise",
"ProgramError",
"(",
"msg",
")",
"return",
"True"
] |
Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
|
[
"Checks",
"the",
"arguments",
"and",
"options",
"."
] |
python
|
train
|
dropseed/combine
|
combine/cli.py
|
https://github.com/dropseed/combine/blob/b0d622d09fcb121bc12e65f6044cb3a940b6b052/combine/cli.py#L65-L73
|
def highlight_info(ctx, style):
"""Outputs the CSS which can be customized for highlighted code"""
click.secho("The following styles are available to choose from:", fg="green")
click.echo(list(pygments.styles.get_all_styles()))
click.echo()
click.secho(
f'The following CSS for the "{style}" style can be customized:', fg="green"
)
click.echo(pygments.formatters.HtmlFormatter(style=style).get_style_defs())
|
[
"def",
"highlight_info",
"(",
"ctx",
",",
"style",
")",
":",
"click",
".",
"secho",
"(",
"\"The following styles are available to choose from:\"",
",",
"fg",
"=",
"\"green\"",
")",
"click",
".",
"echo",
"(",
"list",
"(",
"pygments",
".",
"styles",
".",
"get_all_styles",
"(",
")",
")",
")",
"click",
".",
"echo",
"(",
")",
"click",
".",
"secho",
"(",
"f'The following CSS for the \"{style}\" style can be customized:'",
",",
"fg",
"=",
"\"green\"",
")",
"click",
".",
"echo",
"(",
"pygments",
".",
"formatters",
".",
"HtmlFormatter",
"(",
"style",
"=",
"style",
")",
".",
"get_style_defs",
"(",
")",
")"
] |
Outputs the CSS which can be customized for highlighted code
|
[
"Outputs",
"the",
"CSS",
"which",
"can",
"be",
"customized",
"for",
"highlighted",
"code"
] |
python
|
test
|
ppb/pursuedpybear
|
ppb/engine.py
|
https://github.com/ppb/pursuedpybear/blob/db3bfaaf86d14b4d1bb9e0b24cc8dc63f29c2191/ppb/engine.py#L160-L165
|
def on_replace_scene(self, event: events.ReplaceScene, signal):
"""
Replace the running scene with a new one.
"""
self.stop_scene()
self.start_scene(event.new_scene, event.kwargs)
|
[
"def",
"on_replace_scene",
"(",
"self",
",",
"event",
":",
"events",
".",
"ReplaceScene",
",",
"signal",
")",
":",
"self",
".",
"stop_scene",
"(",
")",
"self",
".",
"start_scene",
"(",
"event",
".",
"new_scene",
",",
"event",
".",
"kwargs",
")"
] |
Replace the running scene with a new one.
|
[
"Replace",
"the",
"running",
"scene",
"with",
"a",
"new",
"one",
"."
] |
python
|
train
|
OCHA-DAP/hdx-python-api
|
src/hdx/data/hdxobject.py
|
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/hdxobject.py#L568-L583
|
def _add_strings_to_commastring(self, field, strings):
# type: (str, List[str]) -> bool
"""Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present.
"""
allstringsadded = True
for string in strings:
if not self._add_string_to_commastring(field, string):
allstringsadded = False
return allstringsadded
|
[
"def",
"_add_strings_to_commastring",
"(",
"self",
",",
"field",
",",
"strings",
")",
":",
"# type: (str, List[str]) -> bool",
"allstringsadded",
"=",
"True",
"for",
"string",
"in",
"strings",
":",
"if",
"not",
"self",
".",
"_add_string_to_commastring",
"(",
"field",
",",
"string",
")",
":",
"allstringsadded",
"=",
"False",
"return",
"allstringsadded"
] |
Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present.
|
[
"Add",
"a",
"list",
"of",
"strings",
"to",
"a",
"comma",
"separated",
"list",
"of",
"strings"
] |
python
|
train
|
ArduPilot/MAVProxy
|
MAVProxy/modules/mavproxy_map/mp_tile.py
|
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_map/mp_tile.py#L519-L547
|
def area_to_image(self, lat, lon, width, height, ground_width, zoom=None, ordered=True):
'''return an RGB image for an area of land, with ground_width
in meters, and width/height in pixels.
lat/lon is the top left corner. The zoom is automatically
chosen to avoid having to grow the tiles'''
img = np.zeros((height,width,3), np.uint8)
tlist = self.area_to_tile_list(lat, lon, width, height, ground_width, zoom)
# order the display by distance from the middle, so the download happens
# close to the middle of the image first
if ordered:
(midlat, midlon) = self.coord_from_area(width/2, height/2, lat, lon, width, ground_width)
tlist.sort(key=lambda d: d.distance(midlat, midlon), reverse=True)
for t in tlist:
scaled_tile = self.scaled_tile(t)
w = min(width - t.dstx, scaled_tile.shape[1] - t.srcx)
h = min(height - t.dsty, scaled_tile.shape[0] - t.srcy)
if w > 0 and h > 0:
scaled_tile_roi = scaled_tile[t.srcy:t.srcy+h, t.srcx:t.srcx+w]
img[t.dsty:t.dsty+h, t.dstx:t.dstx+w] = scaled_tile_roi.copy()
# return as an RGB image
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
|
[
"def",
"area_to_image",
"(",
"self",
",",
"lat",
",",
"lon",
",",
"width",
",",
"height",
",",
"ground_width",
",",
"zoom",
"=",
"None",
",",
"ordered",
"=",
"True",
")",
":",
"img",
"=",
"np",
".",
"zeros",
"(",
"(",
"height",
",",
"width",
",",
"3",
")",
",",
"np",
".",
"uint8",
")",
"tlist",
"=",
"self",
".",
"area_to_tile_list",
"(",
"lat",
",",
"lon",
",",
"width",
",",
"height",
",",
"ground_width",
",",
"zoom",
")",
"# order the display by distance from the middle, so the download happens",
"# close to the middle of the image first",
"if",
"ordered",
":",
"(",
"midlat",
",",
"midlon",
")",
"=",
"self",
".",
"coord_from_area",
"(",
"width",
"/",
"2",
",",
"height",
"/",
"2",
",",
"lat",
",",
"lon",
",",
"width",
",",
"ground_width",
")",
"tlist",
".",
"sort",
"(",
"key",
"=",
"lambda",
"d",
":",
"d",
".",
"distance",
"(",
"midlat",
",",
"midlon",
")",
",",
"reverse",
"=",
"True",
")",
"for",
"t",
"in",
"tlist",
":",
"scaled_tile",
"=",
"self",
".",
"scaled_tile",
"(",
"t",
")",
"w",
"=",
"min",
"(",
"width",
"-",
"t",
".",
"dstx",
",",
"scaled_tile",
".",
"shape",
"[",
"1",
"]",
"-",
"t",
".",
"srcx",
")",
"h",
"=",
"min",
"(",
"height",
"-",
"t",
".",
"dsty",
",",
"scaled_tile",
".",
"shape",
"[",
"0",
"]",
"-",
"t",
".",
"srcy",
")",
"if",
"w",
">",
"0",
"and",
"h",
">",
"0",
":",
"scaled_tile_roi",
"=",
"scaled_tile",
"[",
"t",
".",
"srcy",
":",
"t",
".",
"srcy",
"+",
"h",
",",
"t",
".",
"srcx",
":",
"t",
".",
"srcx",
"+",
"w",
"]",
"img",
"[",
"t",
".",
"dsty",
":",
"t",
".",
"dsty",
"+",
"h",
",",
"t",
".",
"dstx",
":",
"t",
".",
"dstx",
"+",
"w",
"]",
"=",
"scaled_tile_roi",
".",
"copy",
"(",
")",
"# return as an RGB image",
"img",
"=",
"cv2",
".",
"cvtColor",
"(",
"img",
",",
"cv2",
".",
"COLOR_BGR2RGB",
")",
"return",
"img"
] |
return an RGB image for an area of land, with ground_width
in meters, and width/height in pixels.
lat/lon is the top left corner. The zoom is automatically
chosen to avoid having to grow the tiles
|
[
"return",
"an",
"RGB",
"image",
"for",
"an",
"area",
"of",
"land",
"with",
"ground_width",
"in",
"meters",
"and",
"width",
"/",
"height",
"in",
"pixels",
"."
] |
python
|
train
|
vmonaco/pohmm
|
pohmm/utils.py
|
https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/utils.py#L122-L134
|
def steadystate(A, max_iter=100):
"""
Empirically determine the steady state probabilities from a stochastic matrix
"""
P = np.linalg.matrix_power(A, max_iter)
# Determine the unique rows in A
v = []
for i in range(len(P)):
if not np.any([np.allclose(P[i], vi, ) for vi in v]):
v.append(P[i])
return normalize(np.sum(v, axis=0))
|
[
"def",
"steadystate",
"(",
"A",
",",
"max_iter",
"=",
"100",
")",
":",
"P",
"=",
"np",
".",
"linalg",
".",
"matrix_power",
"(",
"A",
",",
"max_iter",
")",
"# Determine the unique rows in A",
"v",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"P",
")",
")",
":",
"if",
"not",
"np",
".",
"any",
"(",
"[",
"np",
".",
"allclose",
"(",
"P",
"[",
"i",
"]",
",",
"vi",
",",
")",
"for",
"vi",
"in",
"v",
"]",
")",
":",
"v",
".",
"append",
"(",
"P",
"[",
"i",
"]",
")",
"return",
"normalize",
"(",
"np",
".",
"sum",
"(",
"v",
",",
"axis",
"=",
"0",
")",
")"
] |
Empirically determine the steady state probabilities from a stochastic matrix
|
[
"Empirically",
"determine",
"the",
"steady",
"state",
"probabilities",
"from",
"a",
"stochastic",
"matrix"
] |
python
|
train
|
sibirrer/lenstronomy
|
lenstronomy/LensModel/lens_model.py
|
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/lens_model.py#L189-L210
|
def flexion(self, x, y, kwargs, diff=0.000001):
"""
third derivatives (flexion)
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param diff: numerical differential length of Hessian
:return: f_xxx, f_xxy, f_xyy, f_yyy
"""
f_xx, f_xy, f_yx, f_yy = self.hessian(x, y, kwargs)
f_xx_dx, f_xy_dx, f_yx_dx, f_yy_dx = self.hessian(x + diff, y, kwargs)
f_xx_dy, f_xy_dy, f_yx_dy, f_yy_dy = self.hessian(x, y + diff, kwargs)
f_xxx = (f_xx_dx - f_xx) / diff
f_xxy = (f_xx_dy - f_xx) / diff
f_xyy = (f_xy_dy - f_xy) / diff
f_yyy = (f_yy_dy - f_yy) / diff
return f_xxx, f_xxy, f_xyy, f_yyy
|
[
"def",
"flexion",
"(",
"self",
",",
"x",
",",
"y",
",",
"kwargs",
",",
"diff",
"=",
"0.000001",
")",
":",
"f_xx",
",",
"f_xy",
",",
"f_yx",
",",
"f_yy",
"=",
"self",
".",
"hessian",
"(",
"x",
",",
"y",
",",
"kwargs",
")",
"f_xx_dx",
",",
"f_xy_dx",
",",
"f_yx_dx",
",",
"f_yy_dx",
"=",
"self",
".",
"hessian",
"(",
"x",
"+",
"diff",
",",
"y",
",",
"kwargs",
")",
"f_xx_dy",
",",
"f_xy_dy",
",",
"f_yx_dy",
",",
"f_yy_dy",
"=",
"self",
".",
"hessian",
"(",
"x",
",",
"y",
"+",
"diff",
",",
"kwargs",
")",
"f_xxx",
"=",
"(",
"f_xx_dx",
"-",
"f_xx",
")",
"/",
"diff",
"f_xxy",
"=",
"(",
"f_xx_dy",
"-",
"f_xx",
")",
"/",
"diff",
"f_xyy",
"=",
"(",
"f_xy_dy",
"-",
"f_xy",
")",
"/",
"diff",
"f_yyy",
"=",
"(",
"f_yy_dy",
"-",
"f_yy",
")",
"/",
"diff",
"return",
"f_xxx",
",",
"f_xxy",
",",
"f_xyy",
",",
"f_yyy"
] |
third derivatives (flexion)
:param x: x-position (preferentially arcsec)
:type x: numpy array
:param y: y-position (preferentially arcsec)
:type y: numpy array
:param kwargs: list of keyword arguments of lens model parameters matching the lens model classes
:param diff: numerical differential length of Hessian
:return: f_xxx, f_xxy, f_xyy, f_yyy
|
[
"third",
"derivatives",
"(",
"flexion",
")"
] |
python
|
train
|
manns/pyspread
|
pyspread/src/actions/_grid_cell_actions.py
|
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_cell_actions.py#L425-L445
|
def get_new_cell_attr_state(self, key, attr_key):
"""Returns new attr cell state for toggles
Parameters
----------
key: 3-Tuple
\tCell for which attr toggle shall be returned
attr_key: Hashable
\tAttribute key
"""
cell_attributes = self.grid.code_array.cell_attributes
attr_values = self.attr_toggle_values[attr_key]
# Map attr_value to next attr_value
attr_map = dict(zip(attr_values, attr_values[1:] + attr_values[:1]))
# Return next value from attr_toggle_values value list
return attr_map[cell_attributes[key][attr_key]]
|
[
"def",
"get_new_cell_attr_state",
"(",
"self",
",",
"key",
",",
"attr_key",
")",
":",
"cell_attributes",
"=",
"self",
".",
"grid",
".",
"code_array",
".",
"cell_attributes",
"attr_values",
"=",
"self",
".",
"attr_toggle_values",
"[",
"attr_key",
"]",
"# Map attr_value to next attr_value",
"attr_map",
"=",
"dict",
"(",
"zip",
"(",
"attr_values",
",",
"attr_values",
"[",
"1",
":",
"]",
"+",
"attr_values",
"[",
":",
"1",
"]",
")",
")",
"# Return next value from attr_toggle_values value list",
"return",
"attr_map",
"[",
"cell_attributes",
"[",
"key",
"]",
"[",
"attr_key",
"]",
"]"
] |
Returns new attr cell state for toggles
Parameters
----------
key: 3-Tuple
\tCell for which attr toggle shall be returned
attr_key: Hashable
\tAttribute key
|
[
"Returns",
"new",
"attr",
"cell",
"state",
"for",
"toggles"
] |
python
|
train
|
apache/incubator-mxnet
|
example/rcnn/symdata/anchor.py
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rcnn/symdata/anchor.py#L44-L53
|
def _generate_base_anchors(base_size, scales, ratios):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = AnchorGenerator._ratio_enum(base_anchor, ratios)
anchors = np.vstack([AnchorGenerator._scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors
|
[
"def",
"_generate_base_anchors",
"(",
"base_size",
",",
"scales",
",",
"ratios",
")",
":",
"base_anchor",
"=",
"np",
".",
"array",
"(",
"[",
"1",
",",
"1",
",",
"base_size",
",",
"base_size",
"]",
")",
"-",
"1",
"ratio_anchors",
"=",
"AnchorGenerator",
".",
"_ratio_enum",
"(",
"base_anchor",
",",
"ratios",
")",
"anchors",
"=",
"np",
".",
"vstack",
"(",
"[",
"AnchorGenerator",
".",
"_scale_enum",
"(",
"ratio_anchors",
"[",
"i",
",",
":",
"]",
",",
"scales",
")",
"for",
"i",
"in",
"range",
"(",
"ratio_anchors",
".",
"shape",
"[",
"0",
"]",
")",
"]",
")",
"return",
"anchors"
] |
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
|
[
"Generate",
"anchor",
"(",
"reference",
")",
"windows",
"by",
"enumerating",
"aspect",
"ratios",
"X",
"scales",
"wrt",
"a",
"reference",
"(",
"0",
"0",
"15",
"15",
")",
"window",
"."
] |
python
|
train
|
tammoippen/iso4217parse
|
iso4217parse/__init__.py
|
https://github.com/tammoippen/iso4217parse/blob/dd2971bd66e83424c43d16d6b54b3f6d0c4201cf/iso4217parse/__init__.py#L157-L181
|
def by_symbol(symbol, country_code=None):
"""Get list of possible currencies for symbol; filter by country_code
Look for all currencies that use the `symbol`. If there are currencies used
in the country of `country_code`, return only those; otherwise return all
found currencies.
Parameters:
symbol: unicode Currency symbol.
country_code: Optional[unicode] Iso3166 alpha2 country code.
Returns:
List[Currency]: Currency objects for `symbol`; filter by country_code.
"""
res = _data()['symbol'].get(symbol)
if res:
tmp_res = []
for d in res:
if country_code in d.countries:
tmp_res += [d]
if tmp_res:
return tmp_res
if country_code is None:
return res
|
[
"def",
"by_symbol",
"(",
"symbol",
",",
"country_code",
"=",
"None",
")",
":",
"res",
"=",
"_data",
"(",
")",
"[",
"'symbol'",
"]",
".",
"get",
"(",
"symbol",
")",
"if",
"res",
":",
"tmp_res",
"=",
"[",
"]",
"for",
"d",
"in",
"res",
":",
"if",
"country_code",
"in",
"d",
".",
"countries",
":",
"tmp_res",
"+=",
"[",
"d",
"]",
"if",
"tmp_res",
":",
"return",
"tmp_res",
"if",
"country_code",
"is",
"None",
":",
"return",
"res"
] |
Get list of possible currencies for symbol; filter by country_code
Look for all currencies that use the `symbol`. If there are currencies used
in the country of `country_code`, return only those; otherwise return all
found currencies.
Parameters:
symbol: unicode Currency symbol.
country_code: Optional[unicode] Iso3166 alpha2 country code.
Returns:
List[Currency]: Currency objects for `symbol`; filter by country_code.
|
[
"Get",
"list",
"of",
"possible",
"currencies",
"for",
"symbol",
";",
"filter",
"by",
"country_code"
] |
python
|
train
|
ronaldguillen/wave
|
wave/metadata.py
|
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/metadata.py#L115-L149
|
def get_field_info(self, field):
"""
Given an instance of a serializer field, return a dictionary
of metadata about it.
"""
field_info = OrderedDict()
field_info['type'] = self.label_lookup[field]
field_info['required'] = getattr(field, 'required', False)
attrs = [
'read_only', 'label', 'help_text',
'min_length', 'max_length',
'min_value', 'max_value'
]
for attr in attrs:
value = getattr(field, attr, None)
if value is not None and value != '':
field_info[attr] = force_text(value, strings_only=True)
if getattr(field, 'child', None):
field_info['child'] = self.get_field_info(field.child)
elif getattr(field, 'fields', None):
field_info['children'] = self.get_serializer_info(field)
if not field_info.get('read_only') and hasattr(field, 'choices'):
field_info['choices'] = [
{
'value': choice_value,
'display_name': force_text(choice_name, strings_only=True)
}
for choice_value, choice_name in field.choices.items()
]
return field_info
|
[
"def",
"get_field_info",
"(",
"self",
",",
"field",
")",
":",
"field_info",
"=",
"OrderedDict",
"(",
")",
"field_info",
"[",
"'type'",
"]",
"=",
"self",
".",
"label_lookup",
"[",
"field",
"]",
"field_info",
"[",
"'required'",
"]",
"=",
"getattr",
"(",
"field",
",",
"'required'",
",",
"False",
")",
"attrs",
"=",
"[",
"'read_only'",
",",
"'label'",
",",
"'help_text'",
",",
"'min_length'",
",",
"'max_length'",
",",
"'min_value'",
",",
"'max_value'",
"]",
"for",
"attr",
"in",
"attrs",
":",
"value",
"=",
"getattr",
"(",
"field",
",",
"attr",
",",
"None",
")",
"if",
"value",
"is",
"not",
"None",
"and",
"value",
"!=",
"''",
":",
"field_info",
"[",
"attr",
"]",
"=",
"force_text",
"(",
"value",
",",
"strings_only",
"=",
"True",
")",
"if",
"getattr",
"(",
"field",
",",
"'child'",
",",
"None",
")",
":",
"field_info",
"[",
"'child'",
"]",
"=",
"self",
".",
"get_field_info",
"(",
"field",
".",
"child",
")",
"elif",
"getattr",
"(",
"field",
",",
"'fields'",
",",
"None",
")",
":",
"field_info",
"[",
"'children'",
"]",
"=",
"self",
".",
"get_serializer_info",
"(",
"field",
")",
"if",
"not",
"field_info",
".",
"get",
"(",
"'read_only'",
")",
"and",
"hasattr",
"(",
"field",
",",
"'choices'",
")",
":",
"field_info",
"[",
"'choices'",
"]",
"=",
"[",
"{",
"'value'",
":",
"choice_value",
",",
"'display_name'",
":",
"force_text",
"(",
"choice_name",
",",
"strings_only",
"=",
"True",
")",
"}",
"for",
"choice_value",
",",
"choice_name",
"in",
"field",
".",
"choices",
".",
"items",
"(",
")",
"]",
"return",
"field_info"
] |
Given an instance of a serializer field, return a dictionary
of metadata about it.
|
[
"Given",
"an",
"instance",
"of",
"a",
"serializer",
"field",
"return",
"a",
"dictionary",
"of",
"metadata",
"about",
"it",
"."
] |
python
|
train
|
bitesofcode/projexui
|
projexui/xsettings.py
|
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/xsettings.py#L156-L197
|
def restoreValue(self, xelem):
"""
Stores the value for the inptued instance to the given xml element.
:param xelem | <xml.etree.Element>
:return <variant>
"""
typ = xelem.get('type')
if typ == 'color':
return QtGui.QColor(xelem.text)
elif typ == 'point':
return QtCore.QPoint(*map(int, xelem.text.split(',')))
elif typ == 'pointf':
return QtCore.QPointF(*map(float, xelem.text.split(',')))
elif typ == 'rect':
return QtCore.QRectF(*map(int, xelem.text.split(',')))
elif typ == 'rectf':
return QtCore.QRectF(*map(float, xelem.text.split(',')))
elif typ == 'bytea':
return QtCore.QByteArray(cPickle.loads(xelem.text))
elif typ == 'pickle':
return cPickle.loads(xelem.text)
elif typ == 'xml':
return xelem[0]
elif typ in ('str', 'unicode'):
return xelem.text
else:
try:
return eval('{0}({1})'.format(typ, xelem.text))
except:
return None
|
[
"def",
"restoreValue",
"(",
"self",
",",
"xelem",
")",
":",
"typ",
"=",
"xelem",
".",
"get",
"(",
"'type'",
")",
"if",
"typ",
"==",
"'color'",
":",
"return",
"QtGui",
".",
"QColor",
"(",
"xelem",
".",
"text",
")",
"elif",
"typ",
"==",
"'point'",
":",
"return",
"QtCore",
".",
"QPoint",
"(",
"*",
"map",
"(",
"int",
",",
"xelem",
".",
"text",
".",
"split",
"(",
"','",
")",
")",
")",
"elif",
"typ",
"==",
"'pointf'",
":",
"return",
"QtCore",
".",
"QPointF",
"(",
"*",
"map",
"(",
"float",
",",
"xelem",
".",
"text",
".",
"split",
"(",
"','",
")",
")",
")",
"elif",
"typ",
"==",
"'rect'",
":",
"return",
"QtCore",
".",
"QRectF",
"(",
"*",
"map",
"(",
"int",
",",
"xelem",
".",
"text",
".",
"split",
"(",
"','",
")",
")",
")",
"elif",
"typ",
"==",
"'rectf'",
":",
"return",
"QtCore",
".",
"QRectF",
"(",
"*",
"map",
"(",
"float",
",",
"xelem",
".",
"text",
".",
"split",
"(",
"','",
")",
")",
")",
"elif",
"typ",
"==",
"'bytea'",
":",
"return",
"QtCore",
".",
"QByteArray",
"(",
"cPickle",
".",
"loads",
"(",
"xelem",
".",
"text",
")",
")",
"elif",
"typ",
"==",
"'pickle'",
":",
"return",
"cPickle",
".",
"loads",
"(",
"xelem",
".",
"text",
")",
"elif",
"typ",
"==",
"'xml'",
":",
"return",
"xelem",
"[",
"0",
"]",
"elif",
"typ",
"in",
"(",
"'str'",
",",
"'unicode'",
")",
":",
"return",
"xelem",
".",
"text",
"else",
":",
"try",
":",
"return",
"eval",
"(",
"'{0}({1})'",
".",
"format",
"(",
"typ",
",",
"xelem",
".",
"text",
")",
")",
"except",
":",
"return",
"None"
] |
Stores the value for the inptued instance to the given xml element.
:param xelem | <xml.etree.Element>
:return <variant>
|
[
"Stores",
"the",
"value",
"for",
"the",
"inptued",
"instance",
"to",
"the",
"given",
"xml",
"element",
".",
":",
"param",
"xelem",
"|",
"<xml",
".",
"etree",
".",
"Element",
">",
":",
"return",
"<variant",
">"
] |
python
|
train
|
google/prettytensor
|
prettytensor/pretty_tensor_loss_methods.py
|
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_loss_methods.py#L580-L600
|
def _eval_metric(input_, topk, correct_predictions, examples, phase):
"""Creates the standard tracking varibles if in test and returns accuracy."""
my_parameters = {}
if phase in (Phase.test, Phase.infer):
dtype = tf.float32
# Create the variables using tf.Variable because we don't want to share.
count = tf.Variable(tf.constant(0, dtype=dtype),
name='count_%d' % topk,
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False)
correct = tf.Variable(tf.constant(0, dtype=dtype),
name='correct_%d' % topk,
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False)
my_parameters['count'] = count
my_parameters['correct'] = correct
with input_.g.device(count.device):
examples = tf.assign_add(count, examples)
with input_.g.device(correct.device):
correct_predictions = tf.assign_add(correct, correct_predictions)
return correct_predictions, examples, my_parameters
|
[
"def",
"_eval_metric",
"(",
"input_",
",",
"topk",
",",
"correct_predictions",
",",
"examples",
",",
"phase",
")",
":",
"my_parameters",
"=",
"{",
"}",
"if",
"phase",
"in",
"(",
"Phase",
".",
"test",
",",
"Phase",
".",
"infer",
")",
":",
"dtype",
"=",
"tf",
".",
"float32",
"# Create the variables using tf.Variable because we don't want to share.",
"count",
"=",
"tf",
".",
"Variable",
"(",
"tf",
".",
"constant",
"(",
"0",
",",
"dtype",
"=",
"dtype",
")",
",",
"name",
"=",
"'count_%d'",
"%",
"topk",
",",
"collections",
"=",
"[",
"bookkeeper",
".",
"GraphKeys",
".",
"TEST_VARIABLES",
"]",
",",
"trainable",
"=",
"False",
")",
"correct",
"=",
"tf",
".",
"Variable",
"(",
"tf",
".",
"constant",
"(",
"0",
",",
"dtype",
"=",
"dtype",
")",
",",
"name",
"=",
"'correct_%d'",
"%",
"topk",
",",
"collections",
"=",
"[",
"bookkeeper",
".",
"GraphKeys",
".",
"TEST_VARIABLES",
"]",
",",
"trainable",
"=",
"False",
")",
"my_parameters",
"[",
"'count'",
"]",
"=",
"count",
"my_parameters",
"[",
"'correct'",
"]",
"=",
"correct",
"with",
"input_",
".",
"g",
".",
"device",
"(",
"count",
".",
"device",
")",
":",
"examples",
"=",
"tf",
".",
"assign_add",
"(",
"count",
",",
"examples",
")",
"with",
"input_",
".",
"g",
".",
"device",
"(",
"correct",
".",
"device",
")",
":",
"correct_predictions",
"=",
"tf",
".",
"assign_add",
"(",
"correct",
",",
"correct_predictions",
")",
"return",
"correct_predictions",
",",
"examples",
",",
"my_parameters"
] |
Creates the standard tracking varibles if in test and returns accuracy.
|
[
"Creates",
"the",
"standard",
"tracking",
"varibles",
"if",
"in",
"test",
"and",
"returns",
"accuracy",
"."
] |
python
|
train
|
pazz/alot
|
alot/helper.py
|
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L416-L436
|
def libmagic_version_at_least(version):
"""
checks if the libmagic library installed is more recent than a given
version.
:param version: minimum version expected in the form XYY (i.e. 5.14 -> 514)
with XYY >= 513
"""
if hasattr(magic, 'open'):
magic_wrapper = magic._libraries['magic']
elif hasattr(magic, 'from_buffer'):
magic_wrapper = magic.libmagic
else:
raise Exception('Unknown magic API')
if not hasattr(magic_wrapper, 'magic_version'):
# The magic_version function has been introduced in libmagic 5.13,
# if it's not present, we can't guess right, so let's assume False
return False
return magic_wrapper.magic_version >= version
|
[
"def",
"libmagic_version_at_least",
"(",
"version",
")",
":",
"if",
"hasattr",
"(",
"magic",
",",
"'open'",
")",
":",
"magic_wrapper",
"=",
"magic",
".",
"_libraries",
"[",
"'magic'",
"]",
"elif",
"hasattr",
"(",
"magic",
",",
"'from_buffer'",
")",
":",
"magic_wrapper",
"=",
"magic",
".",
"libmagic",
"else",
":",
"raise",
"Exception",
"(",
"'Unknown magic API'",
")",
"if",
"not",
"hasattr",
"(",
"magic_wrapper",
",",
"'magic_version'",
")",
":",
"# The magic_version function has been introduced in libmagic 5.13,",
"# if it's not present, we can't guess right, so let's assume False",
"return",
"False",
"return",
"magic_wrapper",
".",
"magic_version",
">=",
"version"
] |
checks if the libmagic library installed is more recent than a given
version.
:param version: minimum version expected in the form XYY (i.e. 5.14 -> 514)
with XYY >= 513
|
[
"checks",
"if",
"the",
"libmagic",
"library",
"installed",
"is",
"more",
"recent",
"than",
"a",
"given",
"version",
"."
] |
python
|
train
|
edx/edx-enterprise
|
enterprise/api/v1/decorators.py
|
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api/v1/decorators.py#L14-L52
|
def enterprise_customer_required(view):
"""
Ensure the user making the API request is associated with an EnterpriseCustomer.
This decorator attempts to find an EnterpriseCustomer associated with the requesting
user and passes that EnterpriseCustomer to the view as a parameter. It will return a
PermissionDenied error if an EnterpriseCustomer cannot be found.
Usage::
@enterprise_customer_required()
def my_view(request, enterprise_customer):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_customer_required)
def get(self, request, enterprise_customer):
# Some functionality ...
"""
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Checks for an enterprise customer associated with the user, calls the view function
if one exists, raises PermissionDenied if not.
"""
user = request.user
enterprise_customer = get_enterprise_customer_for_user(user)
if enterprise_customer:
args = args + (enterprise_customer,)
return view(request, *args, **kwargs)
else:
raise PermissionDenied(
'User {username} is not associated with an EnterpriseCustomer.'.format(
username=user.username
)
)
return wrapper
|
[
"def",
"enterprise_customer_required",
"(",
"view",
")",
":",
"@",
"wraps",
"(",
"view",
")",
"def",
"wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Checks for an enterprise customer associated with the user, calls the view function\n if one exists, raises PermissionDenied if not.\n \"\"\"",
"user",
"=",
"request",
".",
"user",
"enterprise_customer",
"=",
"get_enterprise_customer_for_user",
"(",
"user",
")",
"if",
"enterprise_customer",
":",
"args",
"=",
"args",
"+",
"(",
"enterprise_customer",
",",
")",
"return",
"view",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"PermissionDenied",
"(",
"'User {username} is not associated with an EnterpriseCustomer.'",
".",
"format",
"(",
"username",
"=",
"user",
".",
"username",
")",
")",
"return",
"wrapper"
] |
Ensure the user making the API request is associated with an EnterpriseCustomer.
This decorator attempts to find an EnterpriseCustomer associated with the requesting
user and passes that EnterpriseCustomer to the view as a parameter. It will return a
PermissionDenied error if an EnterpriseCustomer cannot be found.
Usage::
@enterprise_customer_required()
def my_view(request, enterprise_customer):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_customer_required)
def get(self, request, enterprise_customer):
# Some functionality ...
|
[
"Ensure",
"the",
"user",
"making",
"the",
"API",
"request",
"is",
"associated",
"with",
"an",
"EnterpriseCustomer",
"."
] |
python
|
valid
|
dead-beef/markovchain
|
markovchain/image/scanner.py
|
https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/image/scanner.py#L271-L285
|
def save(self):
"""Convert to JSON.
Returns
-------
`dict`
JSON data.
"""
data = super().save()
data['resize'] = list(self.resize) if self.resize is not None else None
data['traversal'] = [t.save() for t in self.traversal]
data['levels'] = self.levels
data['level_scale'] = self.level_scale
data['scale'] = self.scale
return data
|
[
"def",
"save",
"(",
"self",
")",
":",
"data",
"=",
"super",
"(",
")",
".",
"save",
"(",
")",
"data",
"[",
"'resize'",
"]",
"=",
"list",
"(",
"self",
".",
"resize",
")",
"if",
"self",
".",
"resize",
"is",
"not",
"None",
"else",
"None",
"data",
"[",
"'traversal'",
"]",
"=",
"[",
"t",
".",
"save",
"(",
")",
"for",
"t",
"in",
"self",
".",
"traversal",
"]",
"data",
"[",
"'levels'",
"]",
"=",
"self",
".",
"levels",
"data",
"[",
"'level_scale'",
"]",
"=",
"self",
".",
"level_scale",
"data",
"[",
"'scale'",
"]",
"=",
"self",
".",
"scale",
"return",
"data"
] |
Convert to JSON.
Returns
-------
`dict`
JSON data.
|
[
"Convert",
"to",
"JSON",
"."
] |
python
|
train
|
StellarCN/py-stellar-base
|
stellar_base/operation.py
|
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/operation.py#L443-L458
|
def from_xdr_object(cls, op_xdr_object):
"""Creates a :class:`ChangeTrust` object from an XDR Operation
object.
"""
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check(
'account', op_xdr_object.sourceAccount[0].ed25519).decode()
line = Asset.from_xdr_object(op_xdr_object.body.changeTrustOp.line)
limit = Operation.from_xdr_amount(
op_xdr_object.body.changeTrustOp.limit)
return cls(source=source, asset=line, limit=limit)
|
[
"def",
"from_xdr_object",
"(",
"cls",
",",
"op_xdr_object",
")",
":",
"if",
"not",
"op_xdr_object",
".",
"sourceAccount",
":",
"source",
"=",
"None",
"else",
":",
"source",
"=",
"encode_check",
"(",
"'account'",
",",
"op_xdr_object",
".",
"sourceAccount",
"[",
"0",
"]",
".",
"ed25519",
")",
".",
"decode",
"(",
")",
"line",
"=",
"Asset",
".",
"from_xdr_object",
"(",
"op_xdr_object",
".",
"body",
".",
"changeTrustOp",
".",
"line",
")",
"limit",
"=",
"Operation",
".",
"from_xdr_amount",
"(",
"op_xdr_object",
".",
"body",
".",
"changeTrustOp",
".",
"limit",
")",
"return",
"cls",
"(",
"source",
"=",
"source",
",",
"asset",
"=",
"line",
",",
"limit",
"=",
"limit",
")"
] |
Creates a :class:`ChangeTrust` object from an XDR Operation
object.
|
[
"Creates",
"a",
":",
"class",
":",
"ChangeTrust",
"object",
"from",
"an",
"XDR",
"Operation",
"object",
"."
] |
python
|
train
|
PythonCharmers/python-future
|
src/future/backports/urllib/robotparser.py
|
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/robotparser.py#L55-L58
|
def set_url(self, url):
"""Sets the URL referring to a robots.txt file."""
self.url = url
self.host, self.path = urllib.parse.urlparse(url)[1:3]
|
[
"def",
"set_url",
"(",
"self",
",",
"url",
")",
":",
"self",
".",
"url",
"=",
"url",
"self",
".",
"host",
",",
"self",
".",
"path",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"url",
")",
"[",
"1",
":",
"3",
"]"
] |
Sets the URL referring to a robots.txt file.
|
[
"Sets",
"the",
"URL",
"referring",
"to",
"a",
"robots",
".",
"txt",
"file",
"."
] |
python
|
train
|
fabioz/PyDev.Debugger
|
pydevd_attach_to_process/winappdbg/debug.py
|
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/debug.py#L211-L264
|
def attach(self, dwProcessId):
"""
Attaches to an existing process for debugging.
@see: L{detach}, L{execv}, L{execl}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to attach to.
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error.
Depending on the circumstances, the debugger may or may not have
attached to the target process.
"""
# Get the Process object from the snapshot,
# if missing create a new one.
try:
aProcess = self.system.get_process(dwProcessId)
except KeyError:
aProcess = Process(dwProcessId)
# Warn when mixing 32 and 64 bits.
# This also allows the user to stop attaching altogether,
# depending on how the warnings are configured.
if System.bits != aProcess.get_bits():
msg = "Mixture of 32 and 64 bits is considered experimental." \
" Use at your own risk!"
warnings.warn(msg, MixedBitsWarning)
# Attach to the process.
win32.DebugActiveProcess(dwProcessId)
# Add the new PID to the set of debugees.
self.__attachedDebugees.add(dwProcessId)
# Match the system kill-on-exit flag to our own.
self.__setSystemKillOnExitMode()
# If the Process object was not in the snapshot, add it now.
if not self.system.has_process(dwProcessId):
self.system._add_process(aProcess)
# Scan the process threads and loaded modules.
# This is prefered because the thread and library events do not
# properly give some information, like the filename for each module.
aProcess.scan_threads()
aProcess.scan_modules()
# Return the Process object, like the execv() and execl() methods.
return aProcess
|
[
"def",
"attach",
"(",
"self",
",",
"dwProcessId",
")",
":",
"# Get the Process object from the snapshot,",
"# if missing create a new one.",
"try",
":",
"aProcess",
"=",
"self",
".",
"system",
".",
"get_process",
"(",
"dwProcessId",
")",
"except",
"KeyError",
":",
"aProcess",
"=",
"Process",
"(",
"dwProcessId",
")",
"# Warn when mixing 32 and 64 bits.",
"# This also allows the user to stop attaching altogether,",
"# depending on how the warnings are configured.",
"if",
"System",
".",
"bits",
"!=",
"aProcess",
".",
"get_bits",
"(",
")",
":",
"msg",
"=",
"\"Mixture of 32 and 64 bits is considered experimental.\"",
"\" Use at your own risk!\"",
"warnings",
".",
"warn",
"(",
"msg",
",",
"MixedBitsWarning",
")",
"# Attach to the process.",
"win32",
".",
"DebugActiveProcess",
"(",
"dwProcessId",
")",
"# Add the new PID to the set of debugees.",
"self",
".",
"__attachedDebugees",
".",
"add",
"(",
"dwProcessId",
")",
"# Match the system kill-on-exit flag to our own.",
"self",
".",
"__setSystemKillOnExitMode",
"(",
")",
"# If the Process object was not in the snapshot, add it now.",
"if",
"not",
"self",
".",
"system",
".",
"has_process",
"(",
"dwProcessId",
")",
":",
"self",
".",
"system",
".",
"_add_process",
"(",
"aProcess",
")",
"# Scan the process threads and loaded modules.",
"# This is prefered because the thread and library events do not",
"# properly give some information, like the filename for each module.",
"aProcess",
".",
"scan_threads",
"(",
")",
"aProcess",
".",
"scan_modules",
"(",
")",
"# Return the Process object, like the execv() and execl() methods.",
"return",
"aProcess"
] |
Attaches to an existing process for debugging.
@see: L{detach}, L{execv}, L{execl}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to attach to.
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error.
Depending on the circumstances, the debugger may or may not have
attached to the target process.
|
[
"Attaches",
"to",
"an",
"existing",
"process",
"for",
"debugging",
"."
] |
python
|
train
|
QInfer/python-qinfer
|
src/qinfer/abstract_model.py
|
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/abstract_model.py#L614-L630
|
def domain(self, expparams):
"""
Returns a list of :class:`Domain` objects, one for each input expparam.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property, or, in the case where ``n_outcomes_constant`` is ``True``,
``None`` should be a valid input.
:rtype: list of ``Domain``
"""
# As a convenience to most users, we define domain for them. If a
# fancier domain is desired, this method can easily be overridden.
if self.is_n_outcomes_constant:
return self._domain if expparams is None else [self._domain for ep in expparams]
else:
return [IntegerDomain(min=0,max=n_o-1) for n_o in self.n_outcomes(expparams)]
|
[
"def",
"domain",
"(",
"self",
",",
"expparams",
")",
":",
"# As a convenience to most users, we define domain for them. If a ",
"# fancier domain is desired, this method can easily be overridden.",
"if",
"self",
".",
"is_n_outcomes_constant",
":",
"return",
"self",
".",
"_domain",
"if",
"expparams",
"is",
"None",
"else",
"[",
"self",
".",
"_domain",
"for",
"ep",
"in",
"expparams",
"]",
"else",
":",
"return",
"[",
"IntegerDomain",
"(",
"min",
"=",
"0",
",",
"max",
"=",
"n_o",
"-",
"1",
")",
"for",
"n_o",
"in",
"self",
".",
"n_outcomes",
"(",
"expparams",
")",
"]"
] |
Returns a list of :class:`Domain` objects, one for each input expparam.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property, or, in the case where ``n_outcomes_constant`` is ``True``,
``None`` should be a valid input.
:rtype: list of ``Domain``
|
[
"Returns",
"a",
"list",
"of",
":",
"class",
":",
"Domain",
"objects",
"one",
"for",
"each",
"input",
"expparam",
"."
] |
python
|
train
|
push-things/wallabag_api
|
wallabag_api/wallabag.py
|
https://github.com/push-things/wallabag_api/blob/8d1e10a6ebc03d1ac9af2b38b57eb69f29b4216e/wallabag_api/wallabag.py#L179-L206
|
async def post_entries(self, url, title='', tags='', starred=0, archive=0, content='', language='', published_at='',
authors='', public=1, original_url=''):
"""
POST /api/entries.{_format}
Create an entry
:param url: the url of the note to store
:param title: Optional, we'll get the title from the page.
:param tags: tag1,tag2,tag3 a comma-separated list of tags.
:param starred entry already starred
:param archive entry already archived
:param content additionnal html content
:param language
:param published_at
:param authors
:param public
:param original_url
:return result
"""
params = {'access_token': self.token, 'url': url, 'title': title,
'tags': tags, 'starred': starred, 'archive': archive,
'content': content, 'language': language, 'published_at': published_at,
'authors': authors, 'public': public, 'original_url': original_url}
if len(tags) > 0 and isinstance(tags, list):
params['tags'] = ', '.join(tags)
path = '/api/entries.{ext}'.format(ext=self.format)
return await self.query(path, "post", **params)
|
[
"async",
"def",
"post_entries",
"(",
"self",
",",
"url",
",",
"title",
"=",
"''",
",",
"tags",
"=",
"''",
",",
"starred",
"=",
"0",
",",
"archive",
"=",
"0",
",",
"content",
"=",
"''",
",",
"language",
"=",
"''",
",",
"published_at",
"=",
"''",
",",
"authors",
"=",
"''",
",",
"public",
"=",
"1",
",",
"original_url",
"=",
"''",
")",
":",
"params",
"=",
"{",
"'access_token'",
":",
"self",
".",
"token",
",",
"'url'",
":",
"url",
",",
"'title'",
":",
"title",
",",
"'tags'",
":",
"tags",
",",
"'starred'",
":",
"starred",
",",
"'archive'",
":",
"archive",
",",
"'content'",
":",
"content",
",",
"'language'",
":",
"language",
",",
"'published_at'",
":",
"published_at",
",",
"'authors'",
":",
"authors",
",",
"'public'",
":",
"public",
",",
"'original_url'",
":",
"original_url",
"}",
"if",
"len",
"(",
"tags",
")",
">",
"0",
"and",
"isinstance",
"(",
"tags",
",",
"list",
")",
":",
"params",
"[",
"'tags'",
"]",
"=",
"', '",
".",
"join",
"(",
"tags",
")",
"path",
"=",
"'/api/entries.{ext}'",
".",
"format",
"(",
"ext",
"=",
"self",
".",
"format",
")",
"return",
"await",
"self",
".",
"query",
"(",
"path",
",",
"\"post\"",
",",
"*",
"*",
"params",
")"
] |
POST /api/entries.{_format}
Create an entry
:param url: the url of the note to store
:param title: Optional, we'll get the title from the page.
:param tags: tag1,tag2,tag3 a comma-separated list of tags.
:param starred entry already starred
:param archive entry already archived
:param content additionnal html content
:param language
:param published_at
:param authors
:param public
:param original_url
:return result
|
[
"POST",
"/",
"api",
"/",
"entries",
".",
"{",
"_format",
"}"
] |
python
|
train
|
projectatomic/osbs-client
|
osbs/build/plugins_configuration.py
|
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/build/plugins_configuration.py#L55-L64
|
def remove_plugin(self, phase, name, reason=None):
"""
if config contains plugin, remove it
"""
for p in self.template[phase]:
if p.get('name') == name:
self.template[phase].remove(p)
if reason:
logger.info('Removing {}:{}, {}'.format(phase, name, reason))
break
|
[
"def",
"remove_plugin",
"(",
"self",
",",
"phase",
",",
"name",
",",
"reason",
"=",
"None",
")",
":",
"for",
"p",
"in",
"self",
".",
"template",
"[",
"phase",
"]",
":",
"if",
"p",
".",
"get",
"(",
"'name'",
")",
"==",
"name",
":",
"self",
".",
"template",
"[",
"phase",
"]",
".",
"remove",
"(",
"p",
")",
"if",
"reason",
":",
"logger",
".",
"info",
"(",
"'Removing {}:{}, {}'",
".",
"format",
"(",
"phase",
",",
"name",
",",
"reason",
")",
")",
"break"
] |
if config contains plugin, remove it
|
[
"if",
"config",
"contains",
"plugin",
"remove",
"it"
] |
python
|
train
|
openwisp/netjsonconfig
|
netjsonconfig/backends/base/backend.py
|
https://github.com/openwisp/netjsonconfig/blob/c23ce9732720856e2f6dc54060db71a8182c7d4b/netjsonconfig/backends/base/backend.py#L226-L244
|
def _add_file(self, tar, name, contents, mode=DEFAULT_FILE_MODE):
"""
Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None
"""
byte_contents = BytesIO(contents.encode('utf8'))
info = tarfile.TarInfo(name=name)
info.size = len(contents)
# mtime must be 0 or any checksum operation
# will return a different digest even when content is the same
info.mtime = 0
info.type = tarfile.REGTYPE
info.mode = int(mode, 8) # permissions converted to decimal notation
tar.addfile(tarinfo=info, fileobj=byte_contents)
|
[
"def",
"_add_file",
"(",
"self",
",",
"tar",
",",
"name",
",",
"contents",
",",
"mode",
"=",
"DEFAULT_FILE_MODE",
")",
":",
"byte_contents",
"=",
"BytesIO",
"(",
"contents",
".",
"encode",
"(",
"'utf8'",
")",
")",
"info",
"=",
"tarfile",
".",
"TarInfo",
"(",
"name",
"=",
"name",
")",
"info",
".",
"size",
"=",
"len",
"(",
"contents",
")",
"# mtime must be 0 or any checksum operation",
"# will return a different digest even when content is the same",
"info",
".",
"mtime",
"=",
"0",
"info",
".",
"type",
"=",
"tarfile",
".",
"REGTYPE",
"info",
".",
"mode",
"=",
"int",
"(",
"mode",
",",
"8",
")",
"# permissions converted to decimal notation",
"tar",
".",
"addfile",
"(",
"tarinfo",
"=",
"info",
",",
"fileobj",
"=",
"byte_contents",
")"
] |
Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None
|
[
"Adds",
"a",
"single",
"file",
"in",
"tarfile",
"instance",
"."
] |
python
|
valid
|
CivicSpleen/ambry
|
ambry/library/search_backends/whoosh_backend.py
|
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search_backends/whoosh_backend.py#L270-L276
|
def all(self):
""" Returns list with all indexed partitions. """
partitions = []
for partition in self.index.searcher().documents():
partitions.append(
PartitionSearchResult(dataset_vid=partition['dataset_vid'], vid=partition['vid'], score=1))
return partitions
|
[
"def",
"all",
"(",
"self",
")",
":",
"partitions",
"=",
"[",
"]",
"for",
"partition",
"in",
"self",
".",
"index",
".",
"searcher",
"(",
")",
".",
"documents",
"(",
")",
":",
"partitions",
".",
"append",
"(",
"PartitionSearchResult",
"(",
"dataset_vid",
"=",
"partition",
"[",
"'dataset_vid'",
"]",
",",
"vid",
"=",
"partition",
"[",
"'vid'",
"]",
",",
"score",
"=",
"1",
")",
")",
"return",
"partitions"
] |
Returns list with all indexed partitions.
|
[
"Returns",
"list",
"with",
"all",
"indexed",
"partitions",
"."
] |
python
|
train
|
seleniumbase/SeleniumBase
|
seleniumbase/fixtures/email_manager.py
|
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/email_manager.py#L430-L442
|
def remove_whitespace(self, html):
"""
Clean whitespace from html
@Params
html - html source to remove whitespace from
@Returns
String html without whitespace
"""
# Does python have a better way to do exactly this?
clean_html = html
for char in ("\r", "\n", "\t"):
clean_html = clean_html.replace(char, "")
return clean_html
|
[
"def",
"remove_whitespace",
"(",
"self",
",",
"html",
")",
":",
"# Does python have a better way to do exactly this?",
"clean_html",
"=",
"html",
"for",
"char",
"in",
"(",
"\"\\r\"",
",",
"\"\\n\"",
",",
"\"\\t\"",
")",
":",
"clean_html",
"=",
"clean_html",
".",
"replace",
"(",
"char",
",",
"\"\"",
")",
"return",
"clean_html"
] |
Clean whitespace from html
@Params
html - html source to remove whitespace from
@Returns
String html without whitespace
|
[
"Clean",
"whitespace",
"from",
"html"
] |
python
|
train
|
burnash/gspread
|
gspread/models.py
|
https://github.com/burnash/gspread/blob/0e8debe208095aeed3e3e7136c2fa5cd74090946/gspread/models.py#L580-L593
|
def get_all_values(self):
"""Returns a list of lists containing all cells' values as strings.
.. note::
Empty trailing rows and columns will not be included.
"""
data = self.spreadsheet.values_get(self.title)
try:
return fill_gaps(data['values'])
except KeyError:
return []
|
[
"def",
"get_all_values",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"spreadsheet",
".",
"values_get",
"(",
"self",
".",
"title",
")",
"try",
":",
"return",
"fill_gaps",
"(",
"data",
"[",
"'values'",
"]",
")",
"except",
"KeyError",
":",
"return",
"[",
"]"
] |
Returns a list of lists containing all cells' values as strings.
.. note::
Empty trailing rows and columns will not be included.
|
[
"Returns",
"a",
"list",
"of",
"lists",
"containing",
"all",
"cells",
"values",
"as",
"strings",
"."
] |
python
|
train
|
1flow/python-ftr
|
ftr/config.py
|
https://github.com/1flow/python-ftr/blob/90a2108c5ee005f1bf66dbe8cce68f2b7051b839/ftr/config.py#L127-L255
|
def ftr_get_config(website_url, exact_host_match=False):
""" Download the Five Filters config from centralized repositories.
Repositories can be local if you need to override siteconfigs.
The first entry found is returned. If no configuration is found,
`None` is returned. If :mod:`cacheops` is installed, the result will
be cached with a default expiration delay of 3 days.
:param exact_host_match: If ``False`` (default), we will look for
wildcard config matches. For example if host is
``www.test.example.org``, we will try looking up
``test.example.org`` and ``example.org``.
:param exact_host_match: bool
:param website_url: either a full web URI (eg.
``http://www.website.com:PORT/path/to/a/page.html``) or simply
a domain name (eg. ``www.website.com``). In case of a domain name,
no check is performed yet, be careful of what you pass.
:type website_url: str or unicode
:returns: tuple -- the loaded site config (as unicode string) and
the hostname matched (unicode string too).
:raises: :class:`SiteConfigNotFound` if no config could be found.
.. note:: Whatever ``exact_host_match`` value is, the ``www`` part is
always removed from the URL or domain name.
.. todo:: there is currently no merging/cascading of site configs. In
the original Five Filters implementation, primary and secondary
configurations were merged. We could eventually re-implement this
part if needed by someone. PRs welcome as always.
"""
def check_requests_result(result):
return (
u'text/plain' in result.headers.get('content-type')
and u'<!DOCTYPE html>' not in result.text
and u'<html ' not in result.text
and u'</html>' not in result.text
)
repositories = [
x.strip() for x in os.environ.get(
'PYTHON_FTR_REPOSITORIES',
os.path.expandvars(u'${HOME}/sources/ftr-site-config') + u' '
+ u'https://raw.githubusercontent.com/1flow/ftr-site-config/master/ ' # NOQA
+ u'https://raw.githubusercontent.com/fivefilters/ftr-site-config/master/' # NOQA
).split() if x.strip() != u'']
try:
proto, host_and_port, remaining = split_url(website_url)
except:
host_and_port = website_url
host_domain_parts = host_and_port.split(u'.')
# we don't store / use the “www.” part of domain name in siteconfig.
if host_domain_parts[0] == u'www':
host_domain_parts = host_domain_parts[1:]
if exact_host_match:
domain_names = [u'.'.join(host_domain_parts)]
else:
domain_names = [
u'.'.join(host_domain_parts[-i:])
for i in reversed(range(2, len(host_domain_parts) + 1))
]
LOGGER.debug(u'Gathering configurations for domains %s from %s.',
domain_names, repositories)
for repository in repositories:
# try, in turn:
# website.ext.txt
# .website.ext.txt
for domain_name in domain_names:
skip_repository = False
for txt_siteconfig_name in (
u'{0}.txt'.format(domain_name),
u'.{0}.txt'.format(domain_name),
):
if repository.startswith('http'):
siteconfig_url = repository + txt_siteconfig_name
result = requests.get(siteconfig_url)
if result.status_code == requests.codes.ok:
if not check_requests_result(result):
LOGGER.error(u'“%s” repository URL does not '
u'return text/plain results.',
repository)
skip_repository = True
break
LOGGER.info(u'Using remote siteconfig for domain '
u'%s from %s.', domain_name,
siteconfig_url, extra={
'siteconfig': domain_name})
return result.text, txt_siteconfig_name[:-4]
else:
filename = os.path.join(repository, txt_siteconfig_name)
if os.path.exists(filename):
LOGGER.info(u'Using local siteconfig for domain '
u'%s from %s.', domain_name,
filename, extra={
'siteconfig': domain_name})
with codecs.open(filename, 'rb', encoding='utf8') as f:
return f.read(), txt_siteconfig_name[:-4]
if skip_repository:
break
if skip_repository:
break
raise SiteConfigNotFound(
u'No configuration found for domains {0} in repositories {1}'.format(
u', '.join(domain_names), u', '.join(repositories)
)
)
|
[
"def",
"ftr_get_config",
"(",
"website_url",
",",
"exact_host_match",
"=",
"False",
")",
":",
"def",
"check_requests_result",
"(",
"result",
")",
":",
"return",
"(",
"u'text/plain'",
"in",
"result",
".",
"headers",
".",
"get",
"(",
"'content-type'",
")",
"and",
"u'<!DOCTYPE html>'",
"not",
"in",
"result",
".",
"text",
"and",
"u'<html '",
"not",
"in",
"result",
".",
"text",
"and",
"u'</html>'",
"not",
"in",
"result",
".",
"text",
")",
"repositories",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"os",
".",
"environ",
".",
"get",
"(",
"'PYTHON_FTR_REPOSITORIES'",
",",
"os",
".",
"path",
".",
"expandvars",
"(",
"u'${HOME}/sources/ftr-site-config'",
")",
"+",
"u' '",
"+",
"u'https://raw.githubusercontent.com/1flow/ftr-site-config/master/ '",
"# NOQA",
"+",
"u'https://raw.githubusercontent.com/fivefilters/ftr-site-config/master/'",
"# NOQA",
")",
".",
"split",
"(",
")",
"if",
"x",
".",
"strip",
"(",
")",
"!=",
"u''",
"]",
"try",
":",
"proto",
",",
"host_and_port",
",",
"remaining",
"=",
"split_url",
"(",
"website_url",
")",
"except",
":",
"host_and_port",
"=",
"website_url",
"host_domain_parts",
"=",
"host_and_port",
".",
"split",
"(",
"u'.'",
")",
"# we don't store / use the “www.” part of domain name in siteconfig.",
"if",
"host_domain_parts",
"[",
"0",
"]",
"==",
"u'www'",
":",
"host_domain_parts",
"=",
"host_domain_parts",
"[",
"1",
":",
"]",
"if",
"exact_host_match",
":",
"domain_names",
"=",
"[",
"u'.'",
".",
"join",
"(",
"host_domain_parts",
")",
"]",
"else",
":",
"domain_names",
"=",
"[",
"u'.'",
".",
"join",
"(",
"host_domain_parts",
"[",
"-",
"i",
":",
"]",
")",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"2",
",",
"len",
"(",
"host_domain_parts",
")",
"+",
"1",
")",
")",
"]",
"LOGGER",
".",
"debug",
"(",
"u'Gathering configurations for domains %s from %s.'",
",",
"domain_names",
",",
"repositories",
")",
"for",
"repository",
"in",
"repositories",
":",
"# try, in turn:",
"# website.ext.txt",
"# .website.ext.txt",
"for",
"domain_name",
"in",
"domain_names",
":",
"skip_repository",
"=",
"False",
"for",
"txt_siteconfig_name",
"in",
"(",
"u'{0}.txt'",
".",
"format",
"(",
"domain_name",
")",
",",
"u'.{0}.txt'",
".",
"format",
"(",
"domain_name",
")",
",",
")",
":",
"if",
"repository",
".",
"startswith",
"(",
"'http'",
")",
":",
"siteconfig_url",
"=",
"repository",
"+",
"txt_siteconfig_name",
"result",
"=",
"requests",
".",
"get",
"(",
"siteconfig_url",
")",
"if",
"result",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"ok",
":",
"if",
"not",
"check_requests_result",
"(",
"result",
")",
":",
"LOGGER",
".",
"error",
"(",
"u'“%s” repository URL does not '",
"u'return text/plain results.'",
",",
"repository",
")",
"skip_repository",
"=",
"True",
"break",
"LOGGER",
".",
"info",
"(",
"u'Using remote siteconfig for domain '",
"u'%s from %s.'",
",",
"domain_name",
",",
"siteconfig_url",
",",
"extra",
"=",
"{",
"'siteconfig'",
":",
"domain_name",
"}",
")",
"return",
"result",
".",
"text",
",",
"txt_siteconfig_name",
"[",
":",
"-",
"4",
"]",
"else",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"repository",
",",
"txt_siteconfig_name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"LOGGER",
".",
"info",
"(",
"u'Using local siteconfig for domain '",
"u'%s from %s.'",
",",
"domain_name",
",",
"filename",
",",
"extra",
"=",
"{",
"'siteconfig'",
":",
"domain_name",
"}",
")",
"with",
"codecs",
".",
"open",
"(",
"filename",
",",
"'rb'",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")",
",",
"txt_siteconfig_name",
"[",
":",
"-",
"4",
"]",
"if",
"skip_repository",
":",
"break",
"if",
"skip_repository",
":",
"break",
"raise",
"SiteConfigNotFound",
"(",
"u'No configuration found for domains {0} in repositories {1}'",
".",
"format",
"(",
"u', '",
".",
"join",
"(",
"domain_names",
")",
",",
"u', '",
".",
"join",
"(",
"repositories",
")",
")",
")"
] |
Download the Five Filters config from centralized repositories.
Repositories can be local if you need to override siteconfigs.
The first entry found is returned. If no configuration is found,
`None` is returned. If :mod:`cacheops` is installed, the result will
be cached with a default expiration delay of 3 days.
:param exact_host_match: If ``False`` (default), we will look for
wildcard config matches. For example if host is
``www.test.example.org``, we will try looking up
``test.example.org`` and ``example.org``.
:param exact_host_match: bool
:param website_url: either a full web URI (eg.
``http://www.website.com:PORT/path/to/a/page.html``) or simply
a domain name (eg. ``www.website.com``). In case of a domain name,
no check is performed yet, be careful of what you pass.
:type website_url: str or unicode
:returns: tuple -- the loaded site config (as unicode string) and
the hostname matched (unicode string too).
:raises: :class:`SiteConfigNotFound` if no config could be found.
.. note:: Whatever ``exact_host_match`` value is, the ``www`` part is
always removed from the URL or domain name.
.. todo:: there is currently no merging/cascading of site configs. In
the original Five Filters implementation, primary and secondary
configurations were merged. We could eventually re-implement this
part if needed by someone. PRs welcome as always.
|
[
"Download",
"the",
"Five",
"Filters",
"config",
"from",
"centralized",
"repositories",
"."
] |
python
|
train
|
abseil/abseil-py
|
absl/flags/_argument_parser.py
|
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_argument_parser.py#L494-L510
|
def parse(self, argument):
"""Parses argument as comma-separated list of strings."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
try:
return [s.strip() for s in list(csv.reader([argument], strict=True))[0]]
except csv.Error as e:
# Provide a helpful report for case like
# --listflag="$(printf 'hello,\nworld')"
# IOW, list flag values containing naked newlines. This error
# was previously "reported" by allowing csv.Error to
# propagate.
raise ValueError('Unable to parse the value %r as a %s: %s'
% (argument, self.flag_type(), e))
|
[
"def",
"parse",
"(",
"self",
",",
"argument",
")",
":",
"if",
"isinstance",
"(",
"argument",
",",
"list",
")",
":",
"return",
"argument",
"elif",
"not",
"argument",
":",
"return",
"[",
"]",
"else",
":",
"try",
":",
"return",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"list",
"(",
"csv",
".",
"reader",
"(",
"[",
"argument",
"]",
",",
"strict",
"=",
"True",
")",
")",
"[",
"0",
"]",
"]",
"except",
"csv",
".",
"Error",
"as",
"e",
":",
"# Provide a helpful report for case like",
"# --listflag=\"$(printf 'hello,\\nworld')\"",
"# IOW, list flag values containing naked newlines. This error",
"# was previously \"reported\" by allowing csv.Error to",
"# propagate.",
"raise",
"ValueError",
"(",
"'Unable to parse the value %r as a %s: %s'",
"%",
"(",
"argument",
",",
"self",
".",
"flag_type",
"(",
")",
",",
"e",
")",
")"
] |
Parses argument as comma-separated list of strings.
|
[
"Parses",
"argument",
"as",
"comma",
"-",
"separated",
"list",
"of",
"strings",
"."
] |
python
|
train
|
sandwichcloud/ingredients.tasks
|
ingredients_tasks/vmware.py
|
https://github.com/sandwichcloud/ingredients.tasks/blob/23d2772536f07aa5e4787b7ee67dee2f1faedb08/ingredients_tasks/vmware.py#L220-L238
|
def get_obj(self, vimtype, name, folder=None):
"""
Return an object by name, if name is None the
first found object is returned
"""
obj = None
content = self.service_instance.RetrieveContent()
if folder is None:
folder = content.rootFolder
container = content.viewManager.CreateContainerView(folder, [vimtype], True)
for c in container.view:
if c.name == name:
obj = c
break
container.Destroy()
return obj
|
[
"def",
"get_obj",
"(",
"self",
",",
"vimtype",
",",
"name",
",",
"folder",
"=",
"None",
")",
":",
"obj",
"=",
"None",
"content",
"=",
"self",
".",
"service_instance",
".",
"RetrieveContent",
"(",
")",
"if",
"folder",
"is",
"None",
":",
"folder",
"=",
"content",
".",
"rootFolder",
"container",
"=",
"content",
".",
"viewManager",
".",
"CreateContainerView",
"(",
"folder",
",",
"[",
"vimtype",
"]",
",",
"True",
")",
"for",
"c",
"in",
"container",
".",
"view",
":",
"if",
"c",
".",
"name",
"==",
"name",
":",
"obj",
"=",
"c",
"break",
"container",
".",
"Destroy",
"(",
")",
"return",
"obj"
] |
Return an object by name, if name is None the
first found object is returned
|
[
"Return",
"an",
"object",
"by",
"name",
"if",
"name",
"is",
"None",
"the",
"first",
"found",
"object",
"is",
"returned"
] |
python
|
train
|
spyder-ide/spyder
|
spyder/plugins/help/plugin.py
|
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/help/plugin.py#L341-L344
|
def toggle_wrap_mode(self, checked):
"""Toggle wrap mode"""
self.plain_text.editor.toggle_wrap_mode(checked)
self.set_option('wrap', checked)
|
[
"def",
"toggle_wrap_mode",
"(",
"self",
",",
"checked",
")",
":",
"self",
".",
"plain_text",
".",
"editor",
".",
"toggle_wrap_mode",
"(",
"checked",
")",
"self",
".",
"set_option",
"(",
"'wrap'",
",",
"checked",
")"
] |
Toggle wrap mode
|
[
"Toggle",
"wrap",
"mode"
] |
python
|
train
|
dlintott/gns3-converter
|
gns3converter/main.py
|
https://github.com/dlintott/gns3-converter/blob/acbc55da51de86388dc5b5f6da55809b3c86b7ca/gns3converter/main.py#L197-L216
|
def snapshot_name(topo_name):
"""
Get the snapshot name
:param str topo_name: topology file location. The name is taken from the
directory containing the topology file using the
following format: topology_NAME_snapshot_DATE_TIME
:return: snapshot name
:raises ConvertError: when unable to determine the snapshot name
"""
topo_name = os.path.basename(topology_dirname(topo_name))
snap_re = re.compile('^topology_(.+)(_snapshot_)(\d{6}_\d{6})$')
result = snap_re.search(topo_name)
if result is not None:
snap_name = result.group(1) + '_' + result.group(3)
else:
raise ConvertError('Unable to get snapshot name')
return snap_name
|
[
"def",
"snapshot_name",
"(",
"topo_name",
")",
":",
"topo_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"topology_dirname",
"(",
"topo_name",
")",
")",
"snap_re",
"=",
"re",
".",
"compile",
"(",
"'^topology_(.+)(_snapshot_)(\\d{6}_\\d{6})$'",
")",
"result",
"=",
"snap_re",
".",
"search",
"(",
"topo_name",
")",
"if",
"result",
"is",
"not",
"None",
":",
"snap_name",
"=",
"result",
".",
"group",
"(",
"1",
")",
"+",
"'_'",
"+",
"result",
".",
"group",
"(",
"3",
")",
"else",
":",
"raise",
"ConvertError",
"(",
"'Unable to get snapshot name'",
")",
"return",
"snap_name"
] |
Get the snapshot name
:param str topo_name: topology file location. The name is taken from the
directory containing the topology file using the
following format: topology_NAME_snapshot_DATE_TIME
:return: snapshot name
:raises ConvertError: when unable to determine the snapshot name
|
[
"Get",
"the",
"snapshot",
"name"
] |
python
|
train
|
seryl/Python-Cotendo
|
cotendo/__init__.py
|
https://github.com/seryl/Python-Cotendo/blob/a55e034f0845332319859f6276adc6ba35f5a121/cotendo/__init__.py#L129-L138
|
def doFlush(self, cname, flushExpression, flushType):
"""
doFlush method enables specific content to be "flushed" from the
cache servers.
* Note: The flush API is limited to 1,000 flush invocations per hour
(each flush invocation may include several objects). *
"""
return self.client.service.doFlush(
cname, flushExpression, flushType)
|
[
"def",
"doFlush",
"(",
"self",
",",
"cname",
",",
"flushExpression",
",",
"flushType",
")",
":",
"return",
"self",
".",
"client",
".",
"service",
".",
"doFlush",
"(",
"cname",
",",
"flushExpression",
",",
"flushType",
")"
] |
doFlush method enables specific content to be "flushed" from the
cache servers.
* Note: The flush API is limited to 1,000 flush invocations per hour
(each flush invocation may include several objects). *
|
[
"doFlush",
"method",
"enables",
"specific",
"content",
"to",
"be",
"flushed",
"from",
"the",
"cache",
"servers",
"."
] |
python
|
train
|
log2timeline/plaso
|
plaso/output/rawpy.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/rawpy.py#L68-L75
|
def WriteEventBody(self, event):
"""Writes the body of an event to the output.
Args:
event (EventObject): event.
"""
output_string = NativePythonFormatterHelper.GetFormattedEventObject(event)
self._output_writer.Write(output_string)
|
[
"def",
"WriteEventBody",
"(",
"self",
",",
"event",
")",
":",
"output_string",
"=",
"NativePythonFormatterHelper",
".",
"GetFormattedEventObject",
"(",
"event",
")",
"self",
".",
"_output_writer",
".",
"Write",
"(",
"output_string",
")"
] |
Writes the body of an event to the output.
Args:
event (EventObject): event.
|
[
"Writes",
"the",
"body",
"of",
"an",
"event",
"to",
"the",
"output",
"."
] |
python
|
train
|
a1ezzz/wasp-general
|
wasp_general/network/clients/file.py
|
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/clients/file.py#L140-L152
|
def upload_file(self, file_name, file_obj, *args, **kwargs):
""" :meth:`.WNetworkClientProto.upload_file` method implementation
"""
previous_path = self.session_path()
try:
self.session_path(file_name)
with open(self.full_path(), mode='wb') as f:
chunk = file_obj.read(io.DEFAULT_BUFFER_SIZE)
while len(chunk) > 0:
f.write(chunk)
chunk = file_obj.read(io.DEFAULT_BUFFER_SIZE)
finally:
self.session_path(previous_path)
|
[
"def",
"upload_file",
"(",
"self",
",",
"file_name",
",",
"file_obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"previous_path",
"=",
"self",
".",
"session_path",
"(",
")",
"try",
":",
"self",
".",
"session_path",
"(",
"file_name",
")",
"with",
"open",
"(",
"self",
".",
"full_path",
"(",
")",
",",
"mode",
"=",
"'wb'",
")",
"as",
"f",
":",
"chunk",
"=",
"file_obj",
".",
"read",
"(",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
"while",
"len",
"(",
"chunk",
")",
">",
"0",
":",
"f",
".",
"write",
"(",
"chunk",
")",
"chunk",
"=",
"file_obj",
".",
"read",
"(",
"io",
".",
"DEFAULT_BUFFER_SIZE",
")",
"finally",
":",
"self",
".",
"session_path",
"(",
"previous_path",
")"
] |
:meth:`.WNetworkClientProto.upload_file` method implementation
|
[
":",
"meth",
":",
".",
"WNetworkClientProto",
".",
"upload_file",
"method",
"implementation"
] |
python
|
train
|
ianmiell/shutit
|
shutit_class.py
|
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L1655-L1670
|
def ls(self,
directory,
note=None,
loglevel=logging.DEBUG):
"""Helper proc to list files in a directory
@param directory: directory to list. If the directory doesn't exist, shutit.fail() is called (i.e. the build fails.)
@param note: See send()
@type directory: string
@rtype: list of strings
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.is_shutit_installed(directory,note=note,loglevel=loglevel)
|
[
"def",
"ls",
"(",
"self",
",",
"directory",
",",
"note",
"=",
"None",
",",
"loglevel",
"=",
"logging",
".",
"DEBUG",
")",
":",
"shutit_global",
".",
"shutit_global_object",
".",
"yield_to_draw",
"(",
")",
"shutit_pexpect_session",
"=",
"self",
".",
"get_current_shutit_pexpect_session",
"(",
")",
"return",
"shutit_pexpect_session",
".",
"is_shutit_installed",
"(",
"directory",
",",
"note",
"=",
"note",
",",
"loglevel",
"=",
"loglevel",
")"
] |
Helper proc to list files in a directory
@param directory: directory to list. If the directory doesn't exist, shutit.fail() is called (i.e. the build fails.)
@param note: See send()
@type directory: string
@rtype: list of strings
|
[
"Helper",
"proc",
"to",
"list",
"files",
"in",
"a",
"directory"
] |
python
|
train
|
Arvedui/picuplib
|
picuplib/upload.py
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/upload.py#L147-L170
|
def remote_upload(self, picture_url, resize=None,
rotation=None, noexif=None):
"""
wraps remote_upload funktion
:param str picture_url: URL to picture allowd Protocols are: ftp,\
http, https
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
"""
if not resize:
resize = self._resize
if not rotation:
rotation = self._rotation
if not noexif:
noexif = self._noexif
return remote_upload(self._apikey, picture_url,
resize, rotation, noexif)
|
[
"def",
"remote_upload",
"(",
"self",
",",
"picture_url",
",",
"resize",
"=",
"None",
",",
"rotation",
"=",
"None",
",",
"noexif",
"=",
"None",
")",
":",
"if",
"not",
"resize",
":",
"resize",
"=",
"self",
".",
"_resize",
"if",
"not",
"rotation",
":",
"rotation",
"=",
"self",
".",
"_rotation",
"if",
"not",
"noexif",
":",
"noexif",
"=",
"self",
".",
"_noexif",
"return",
"remote_upload",
"(",
"self",
".",
"_apikey",
",",
"picture_url",
",",
"resize",
",",
"rotation",
",",
"noexif",
")"
] |
wraps remote_upload funktion
:param str picture_url: URL to picture allowd Protocols are: ftp,\
http, https
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
|
[
"wraps",
"remote_upload",
"funktion"
] |
python
|
train
|
TrafficSenseMSD/SumoTools
|
traci/_vehicle.py
|
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_vehicle.py#L1001-L1007
|
def setLength(self, vehID, length):
"""setLength(string, double) -> None
Sets the length in m for the given vehicle.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_LENGTH, vehID, length)
|
[
"def",
"setLength",
"(",
"self",
",",
"vehID",
",",
"length",
")",
":",
"self",
".",
"_connection",
".",
"_sendDoubleCmd",
"(",
"tc",
".",
"CMD_SET_VEHICLE_VARIABLE",
",",
"tc",
".",
"VAR_LENGTH",
",",
"vehID",
",",
"length",
")"
] |
setLength(string, double) -> None
Sets the length in m for the given vehicle.
|
[
"setLength",
"(",
"string",
"double",
")",
"-",
">",
"None"
] |
python
|
train
|
DataONEorg/d1_python
|
lib_common/src/d1_common/cert/jwt.py
|
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/jwt.py#L104-L108
|
def get_subject_with_file_validation(jwt_bu64, cert_path):
"""Same as get_subject_with_local_validation() except that the signing certificate
is read from a local PEM file."""
cert_obj = d1_common.cert.x509.deserialize_pem_file(cert_path)
return get_subject_with_local_validation(jwt_bu64, cert_obj)
|
[
"def",
"get_subject_with_file_validation",
"(",
"jwt_bu64",
",",
"cert_path",
")",
":",
"cert_obj",
"=",
"d1_common",
".",
"cert",
".",
"x509",
".",
"deserialize_pem_file",
"(",
"cert_path",
")",
"return",
"get_subject_with_local_validation",
"(",
"jwt_bu64",
",",
"cert_obj",
")"
] |
Same as get_subject_with_local_validation() except that the signing certificate
is read from a local PEM file.
|
[
"Same",
"as",
"get_subject_with_local_validation",
"()",
"except",
"that",
"the",
"signing",
"certificate",
"is",
"read",
"from",
"a",
"local",
"PEM",
"file",
"."
] |
python
|
train
|
maaku/python-bitcoin
|
bitcoin/tools.py
|
https://github.com/maaku/python-bitcoin/blob/1b80c284170fd3f547cc45f4700ce169f3f99641/bitcoin/tools.py#L59-L80
|
def decompress_amount(x):
"""\
Undo the value compression performed by x=compress_amount(n). The input
x matches one of the following patterns:
x = n = 0
x = 1+10*(9*n + d - 1) + e
x = 1+10*(n - 1) + 9"""
if not x: return 0;
x = x - 1;
# x = 10*(9*n + d - 1) + e
x, e = divmod(x, 10);
n = 0;
if e < 9:
# x = 9*n + d - 1
x, d = divmod(x, 9)
d = d + 1
# x = n
n = x*10 + d
else:
n = x + 1
return n * 10**e
|
[
"def",
"decompress_amount",
"(",
"x",
")",
":",
"if",
"not",
"x",
":",
"return",
"0",
"x",
"=",
"x",
"-",
"1",
"# x = 10*(9*n + d - 1) + e",
"x",
",",
"e",
"=",
"divmod",
"(",
"x",
",",
"10",
")",
"n",
"=",
"0",
"if",
"e",
"<",
"9",
":",
"# x = 9*n + d - 1",
"x",
",",
"d",
"=",
"divmod",
"(",
"x",
",",
"9",
")",
"d",
"=",
"d",
"+",
"1",
"# x = n",
"n",
"=",
"x",
"*",
"10",
"+",
"d",
"else",
":",
"n",
"=",
"x",
"+",
"1",
"return",
"n",
"*",
"10",
"**",
"e"
] |
\
Undo the value compression performed by x=compress_amount(n). The input
x matches one of the following patterns:
x = n = 0
x = 1+10*(9*n + d - 1) + e
x = 1+10*(n - 1) + 9
|
[
"\\",
"Undo",
"the",
"value",
"compression",
"performed",
"by",
"x",
"=",
"compress_amount",
"(",
"n",
")",
".",
"The",
"input",
"x",
"matches",
"one",
"of",
"the",
"following",
"patterns",
":",
"x",
"=",
"n",
"=",
"0",
"x",
"=",
"1",
"+",
"10",
"*",
"(",
"9",
"*",
"n",
"+",
"d",
"-",
"1",
")",
"+",
"e",
"x",
"=",
"1",
"+",
"10",
"*",
"(",
"n",
"-",
"1",
")",
"+",
"9"
] |
python
|
train
|
toomore/grs
|
grs/fetch_data.py
|
https://github.com/toomore/grs/blob/a1285cb57878284a886952968be9e31fbfa595dd/grs/fetch_data.py#L268-L277
|
def out_putfile(self, fpath):
""" 輸出成 CSV 檔
:param path fpath: 檔案輸出位置
.. todo:: files output using `with` syntax.
"""
with open(fpath, 'w') as csv_file:
output = csv.writer(csv_file)
output.writerows(self.__raw_data)
|
[
"def",
"out_putfile",
"(",
"self",
",",
"fpath",
")",
":",
"with",
"open",
"(",
"fpath",
",",
"'w'",
")",
"as",
"csv_file",
":",
"output",
"=",
"csv",
".",
"writer",
"(",
"csv_file",
")",
"output",
".",
"writerows",
"(",
"self",
".",
"__raw_data",
")"
] |
輸出成 CSV 檔
:param path fpath: 檔案輸出位置
.. todo:: files output using `with` syntax.
|
[
"輸出成",
"CSV",
"檔"
] |
python
|
train
|
dougalsutherland/skl-groups
|
skl_groups/divergences/knn.py
|
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L802-L818
|
def tsallis(alphas, Ks, dim, required, clamp=True, to_self=False):
r'''
Estimate the Tsallis-alpha divergence between distributions, based on kNN
distances: (\int p^alpha q^(1-\alpha) - 1) / (\alpha - 1)
If clamp (the default), enforces the estimate is nonnegative.
Returns an array of shape (num_alphas, num_Ks).
'''
alphas = np.reshape(alphas, (-1, 1))
alpha_est = required
est = alpha_est - 1
est /= alphas - 1
if clamp:
np.maximum(est, 0, out=est)
return est
|
[
"def",
"tsallis",
"(",
"alphas",
",",
"Ks",
",",
"dim",
",",
"required",
",",
"clamp",
"=",
"True",
",",
"to_self",
"=",
"False",
")",
":",
"alphas",
"=",
"np",
".",
"reshape",
"(",
"alphas",
",",
"(",
"-",
"1",
",",
"1",
")",
")",
"alpha_est",
"=",
"required",
"est",
"=",
"alpha_est",
"-",
"1",
"est",
"/=",
"alphas",
"-",
"1",
"if",
"clamp",
":",
"np",
".",
"maximum",
"(",
"est",
",",
"0",
",",
"out",
"=",
"est",
")",
"return",
"est"
] |
r'''
Estimate the Tsallis-alpha divergence between distributions, based on kNN
distances: (\int p^alpha q^(1-\alpha) - 1) / (\alpha - 1)
If clamp (the default), enforces the estimate is nonnegative.
Returns an array of shape (num_alphas, num_Ks).
|
[
"r",
"Estimate",
"the",
"Tsallis",
"-",
"alpha",
"divergence",
"between",
"distributions",
"based",
"on",
"kNN",
"distances",
":",
"(",
"\\",
"int",
"p^alpha",
"q^",
"(",
"1",
"-",
"\\",
"alpha",
")",
"-",
"1",
")",
"/",
"(",
"\\",
"alpha",
"-",
"1",
")"
] |
python
|
valid
|
PvtHaggard/pydarksky
|
pydarksky/darksky.py
|
https://github.com/PvtHaggard/pydarksky/blob/c2d68d311bf0a58125fbfe31eff124356899c75b/pydarksky/darksky.py#L255-L311
|
def weather(self, latitude=None, longitude=None, date=None):
# type:(float, float, datetime) -> Weather
"""
:param float latitude: Locations latitude
:param float longitude: Locations longitude
:param datetime or str or int date: Date/time for historical weather data
:raises requests.exceptions.HTTPError: Raises on bad http response
:raises TypeError: Raises on invalid param types
:rtype: Weather
Example uses
.. code-block:: python
# DarkSky instantiation
>>> darksky = pydarksky.DarkSky(api_key)
# Pre-define values
>>> darksky.latitude = -34.9285
>>> darksky.longitude = 138.6005
>>> weather = darksky.weather()
# Pass values as params
>>> weather = darksky.weather(latitude=-34.9285, longitude=138.6005)
# Pass values from dict
>>> kwargs = {"longitude": 138.6005, "latitude": -34.9285}
>>> weather = darksky.weather(**kwargs)
"""
# If params are default(None) check if latitude/longitude have already been defined(Not None)
# Otherwise TypeError is raised
if latitude is None:
if self.latitude is None:
raise TypeError("latitude must be type '<class 'str'>' is None")
else:
self.latitude = latitude
if longitude is None:
if self.longitude is None:
raise TypeError("longitude must be type '<class 'str'>' is None")
else:
self.longitude = longitude
self._date = date
url = self.url
log.debug(url)
self._response = requests.get(url, headers={"Accept-Encoding": "gzip"}, timeout=5)
self._response.raise_for_status()
self._weather = Weather(self._response.text)
return self._weather
|
[
"def",
"weather",
"(",
"self",
",",
"latitude",
"=",
"None",
",",
"longitude",
"=",
"None",
",",
"date",
"=",
"None",
")",
":",
"# type:(float, float, datetime) -> Weather",
"# If params are default(None) check if latitude/longitude have already been defined(Not None)",
"# Otherwise TypeError is raised",
"if",
"latitude",
"is",
"None",
":",
"if",
"self",
".",
"latitude",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"latitude must be type '<class 'str'>' is None\"",
")",
"else",
":",
"self",
".",
"latitude",
"=",
"latitude",
"if",
"longitude",
"is",
"None",
":",
"if",
"self",
".",
"longitude",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"longitude must be type '<class 'str'>' is None\"",
")",
"else",
":",
"self",
".",
"longitude",
"=",
"longitude",
"self",
".",
"_date",
"=",
"date",
"url",
"=",
"self",
".",
"url",
"log",
".",
"debug",
"(",
"url",
")",
"self",
".",
"_response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"{",
"\"Accept-Encoding\"",
":",
"\"gzip\"",
"}",
",",
"timeout",
"=",
"5",
")",
"self",
".",
"_response",
".",
"raise_for_status",
"(",
")",
"self",
".",
"_weather",
"=",
"Weather",
"(",
"self",
".",
"_response",
".",
"text",
")",
"return",
"self",
".",
"_weather"
] |
:param float latitude: Locations latitude
:param float longitude: Locations longitude
:param datetime or str or int date: Date/time for historical weather data
:raises requests.exceptions.HTTPError: Raises on bad http response
:raises TypeError: Raises on invalid param types
:rtype: Weather
Example uses
.. code-block:: python
# DarkSky instantiation
>>> darksky = pydarksky.DarkSky(api_key)
# Pre-define values
>>> darksky.latitude = -34.9285
>>> darksky.longitude = 138.6005
>>> weather = darksky.weather()
# Pass values as params
>>> weather = darksky.weather(latitude=-34.9285, longitude=138.6005)
# Pass values from dict
>>> kwargs = {"longitude": 138.6005, "latitude": -34.9285}
>>> weather = darksky.weather(**kwargs)
|
[
":",
"param",
"float",
"latitude",
":",
"Locations",
"latitude",
":",
"param",
"float",
"longitude",
":",
"Locations",
"longitude",
":",
"param",
"datetime",
"or",
"str",
"or",
"int",
"date",
":",
"Date",
"/",
"time",
"for",
"historical",
"weather",
"data"
] |
python
|
train
|
codelv/enaml-native
|
src/enamlnative/core/eventloop/gen.py
|
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/core/eventloop/gen.py#L1057-L1061
|
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
|
[
"def",
"is_ready",
"(",
"self",
",",
"key",
")",
":",
"if",
"self",
".",
"pending_callbacks",
"is",
"None",
"or",
"key",
"not",
"in",
"self",
".",
"pending_callbacks",
":",
"raise",
"UnknownKeyError",
"(",
"\"key %r is not pending\"",
"%",
"(",
"key",
",",
")",
")",
"return",
"key",
"in",
"self",
".",
"results"
] |
Returns true if a result is available for ``key``.
|
[
"Returns",
"true",
"if",
"a",
"result",
"is",
"available",
"for",
"key",
"."
] |
python
|
train
|
RI-imaging/ODTbrain
|
odtbrain/_alg2d_bpp.py
|
https://github.com/RI-imaging/ODTbrain/blob/abbab8b790f10c0c7aea8d858d7d60f2fdd7161e/odtbrain/_alg2d_bpp.py#L8-L326
|
def backpropagate_2d(uSin, angles, res, nm, lD=0, coords=None,
weight_angles=True,
onlyreal=False, padding=True, padval=0,
count=None, max_count=None, verbose=0):
r"""2D backpropagation with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This method implements the 2D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{1D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{1D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}) \right \}
}{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{1D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{1D}}` 1D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximate zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
radontea.backproject: backprojection based on the Fourier slice
theorem
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
##
##
# TODO:
# - combine the 2nd filter and the rotation in the for loop
# to save memory. However, memory is not a big issue in 2D.
##
##
A = angles.shape[0]
if max_count is not None:
max_count.value += A + 2
# Check input data
assert len(uSin.shape) == 2, "Input data `uB` must have shape (A,N)!"
assert len(uSin) == A, "`len(angles)` must be equal to `len(uSin)`!"
if coords is not None:
raise NotImplementedError("Output coordinates cannot yet be set " +
+ "for the 2D backrpopagation algorithm.")
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Here, the notation defines
# a wave propagating to the right as:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Perform weighting
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1)
sinogram = uSin * weights
else:
sinogram = uSin
# Size of the input data
ln = sinogram.shape[1]
# We perform padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
order = max(64., 2**np.ceil(np.log(ln * 2.1) / np.log(2)))
if padding:
pad = order - ln
else:
pad = 0
padl = np.int(np.ceil(pad / 2))
padr = np.int(pad - padl)
if padval is None:
sino = np.pad(sinogram, ((0, 0), (padl, padr)),
mode="edge")
if verbose > 0:
print("......Padding with edge values.")
else:
sino = np.pad(sinogram, ((0, 0), (padl, padr)),
mode="linear_ramp",
end_values=(padval,))
if verbose > 0:
print("......Verifying padding value: {}".format(padval))
# zero-padded length of sinogram.
lN = sino.shape[1]
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)
# * iint dϕ₀ dkx (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
#
# (r and s₀ are vectors. In the last term we perform the dot-product)
#
# kₘM = sqrt( kₘ² - kx² )
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)^(3/2) a₀)
# * iint dϕ₀ dkx
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
if count is not None:
count.value += 1
# Corresponding sample frequencies
fx = np.fft.fftfreq(lN) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# a, x
kx = kx.reshape(1, -1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
prefactor *= np.abs(kx) * filter_klp
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
# Perform filtering of the sinogram
projection = np.fft.fft(sino, axis=-1) * prefactor
#
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i kₘ (M - 1) yD )
#
# xD = x cos(ϕ₀) + y sin(ϕ₀)
# yD = - x sin(ϕ₀) + y cos(ϕ₀)
# Everything is in pixels
center = ln / 2.0
x = np.arange(lN) - center + .5
# Meshgrid for output array
yv = x.reshape(-1, 1)
Mp = M.reshape(1, -1)
filter2 = np.exp(1j * yv * km * (Mp - 1)) # .reshape(1,lN,lN)
projection = projection.reshape(A, 1, lN) # * filter2
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, ln))
else:
outarr = np.zeros((ln, ln), dtype=np.dtype(complex))
if count is not None:
count.value += 1
# Calculate backpropagations
for i in np.arange(A):
# Create an interpolation object of the projection.
# interpolation of the rotated fourier transformed projection
# this is already tiled onto the entire image.
sino_filtered = np.fft.ifft(projection[i] * filter2, axis=-1)
# Resize filtered sinogram back to original size
sino = sino_filtered[:ln, padl:padl + ln]
rotated_projr = scipy.ndimage.interpolation.rotate(
sino.real, -angles[i] * 180 / np.pi,
reshape=False, mode="constant", cval=0)
# Append results
outarr += rotated_projr
if not onlyreal:
outarr += 1j * scipy.ndimage.interpolation.rotate(
sino.imag, -angles[i] * 180 / np.pi,
reshape=False, mode="constant", cval=0)
if count is not None:
count.value += 1
return outarr
|
[
"def",
"backpropagate_2d",
"(",
"uSin",
",",
"angles",
",",
"res",
",",
"nm",
",",
"lD",
"=",
"0",
",",
"coords",
"=",
"None",
",",
"weight_angles",
"=",
"True",
",",
"onlyreal",
"=",
"False",
",",
"padding",
"=",
"True",
",",
"padval",
"=",
"0",
",",
"count",
"=",
"None",
",",
"max_count",
"=",
"None",
",",
"verbose",
"=",
"0",
")",
":",
"##",
"##",
"# TODO:",
"# - combine the 2nd filter and the rotation in the for loop",
"# to save memory. However, memory is not a big issue in 2D.",
"##",
"##",
"A",
"=",
"angles",
".",
"shape",
"[",
"0",
"]",
"if",
"max_count",
"is",
"not",
"None",
":",
"max_count",
".",
"value",
"+=",
"A",
"+",
"2",
"# Check input data",
"assert",
"len",
"(",
"uSin",
".",
"shape",
")",
"==",
"2",
",",
"\"Input data `uB` must have shape (A,N)!\"",
"assert",
"len",
"(",
"uSin",
")",
"==",
"A",
",",
"\"`len(angles)` must be equal to `len(uSin)`!\"",
"if",
"coords",
"is",
"not",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Output coordinates cannot yet be set \"",
"+",
"+",
"\"for the 2D backrpopagation algorithm.\"",
")",
"# Cut-Off frequency",
"# km [1/px]",
"km",
"=",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"nm",
")",
"/",
"res",
"# Here, the notation defines",
"# a wave propagating to the right as:",
"#",
"# u0(x) = exp(ikx)",
"#",
"# However, in physics usually we use the other sign convention:",
"#",
"# u0(x) = exp(-ikx)",
"#",
"# In order to be consistent with programs like Meep or our",
"# scattering script for a dielectric cylinder, we want to use the",
"# latter sign convention.",
"# This is not a big problem. We only need to multiply the imaginary",
"# part of the scattered wave by -1.",
"# Perform weighting",
"if",
"weight_angles",
":",
"weights",
"=",
"util",
".",
"compute_angle_weights_1d",
"(",
"angles",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"sinogram",
"=",
"uSin",
"*",
"weights",
"else",
":",
"sinogram",
"=",
"uSin",
"# Size of the input data",
"ln",
"=",
"sinogram",
".",
"shape",
"[",
"1",
"]",
"# We perform padding before performing the Fourier transform.",
"# This gets rid of artifacts due to false periodicity and also",
"# speeds up Fourier transforms of the input image size is not",
"# a power of 2.",
"order",
"=",
"max",
"(",
"64.",
",",
"2",
"**",
"np",
".",
"ceil",
"(",
"np",
".",
"log",
"(",
"ln",
"*",
"2.1",
")",
"/",
"np",
".",
"log",
"(",
"2",
")",
")",
")",
"if",
"padding",
":",
"pad",
"=",
"order",
"-",
"ln",
"else",
":",
"pad",
"=",
"0",
"padl",
"=",
"np",
".",
"int",
"(",
"np",
".",
"ceil",
"(",
"pad",
"/",
"2",
")",
")",
"padr",
"=",
"np",
".",
"int",
"(",
"pad",
"-",
"padl",
")",
"if",
"padval",
"is",
"None",
":",
"sino",
"=",
"np",
".",
"pad",
"(",
"sinogram",
",",
"(",
"(",
"0",
",",
"0",
")",
",",
"(",
"padl",
",",
"padr",
")",
")",
",",
"mode",
"=",
"\"edge\"",
")",
"if",
"verbose",
">",
"0",
":",
"print",
"(",
"\"......Padding with edge values.\"",
")",
"else",
":",
"sino",
"=",
"np",
".",
"pad",
"(",
"sinogram",
",",
"(",
"(",
"0",
",",
"0",
")",
",",
"(",
"padl",
",",
"padr",
")",
")",
",",
"mode",
"=",
"\"linear_ramp\"",
",",
"end_values",
"=",
"(",
"padval",
",",
")",
")",
"if",
"verbose",
">",
"0",
":",
"print",
"(",
"\"......Verifying padding value: {}\"",
".",
"format",
"(",
"padval",
")",
")",
"# zero-padded length of sinogram.",
"lN",
"=",
"sino",
".",
"shape",
"[",
"1",
"]",
"# Ask for the filter. Do not include zero (first element).",
"#",
"# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]",
"# - double coverage factor 1/2 already included",
"# - unitary angular frequency to unitary ordinary frequency",
"# conversion performed in calculation of UB=FT(uB).",
"#",
"# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)",
"# * iint dϕ₀ dkx (prefactor)",
"# * |kx| (prefactor)",
"# * exp(-i kₘ M lD ) (prefactor)",
"# * UBϕ₀(kx) (dependent on ϕ₀)",
"# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)",
"#",
"# (r and s₀ are vectors. In the last term we perform the dot-product)",
"#",
"# kₘM = sqrt( kₘ² - kx² )",
"# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )",
"# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )",
"#",
"# The filter can be split into two parts",
"#",
"# 1) part without dependence on the z-coordinate",
"#",
"# -i kₘ / ((2π)^(3/2) a₀)",
"# * iint dϕ₀ dkx",
"# * |kx|",
"# * exp(-i kₘ M lD )",
"#",
"# 2) part with dependence of the z-coordinate",
"#",
"# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )",
"#",
"# The filter (1) can be performed using the classical filter process",
"# as in the backprojection algorithm.",
"#",
"#",
"if",
"count",
"is",
"not",
"None",
":",
"count",
".",
"value",
"+=",
"1",
"# Corresponding sample frequencies",
"fx",
"=",
"np",
".",
"fft",
".",
"fftfreq",
"(",
"lN",
")",
"# 1D array",
"# kx is a 1D array.",
"kx",
"=",
"2",
"*",
"np",
".",
"pi",
"*",
"fx",
"# Differentials for integral",
"dphi0",
"=",
"2",
"*",
"np",
".",
"pi",
"/",
"A",
"# We will later multiply with phi0.",
"# a, x",
"kx",
"=",
"kx",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"# Low-pass filter:",
"# less-than-or-equal would give us zero division error.",
"filter_klp",
"=",
"(",
"kx",
"**",
"2",
"<",
"km",
"**",
"2",
")",
"# Filter M so there are no nans from the root",
"M",
"=",
"1.",
"/",
"km",
"*",
"np",
".",
"sqrt",
"(",
"(",
"km",
"**",
"2",
"-",
"kx",
"**",
"2",
")",
"*",
"filter_klp",
")",
"prefactor",
"=",
"-",
"1j",
"*",
"km",
"/",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"prefactor",
"*=",
"dphi0",
"prefactor",
"*=",
"np",
".",
"abs",
"(",
"kx",
")",
"*",
"filter_klp",
"# new in version 0.1.4:",
"# We multiply by the factor (M-1) instead of just (M)",
"# to take into account that we have a scattered",
"# wave that is normalized by u0.",
"prefactor",
"*=",
"np",
".",
"exp",
"(",
"-",
"1j",
"*",
"km",
"*",
"(",
"M",
"-",
"1",
")",
"*",
"lD",
")",
"# Perform filtering of the sinogram",
"projection",
"=",
"np",
".",
"fft",
".",
"fft",
"(",
"sino",
",",
"axis",
"=",
"-",
"1",
")",
"*",
"prefactor",
"#",
"# filter (2) must be applied before rotation as well",
"# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )",
"#",
"# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )",
"# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )",
"#",
"# This filter is effectively an inverse Fourier transform",
"#",
"# exp(i kx xD) exp(i kₘ (M - 1) yD )",
"#",
"# xD = x cos(ϕ₀) + y sin(ϕ₀)",
"# yD = - x sin(ϕ₀) + y cos(ϕ₀)",
"# Everything is in pixels",
"center",
"=",
"ln",
"/",
"2.0",
"x",
"=",
"np",
".",
"arange",
"(",
"lN",
")",
"-",
"center",
"+",
".5",
"# Meshgrid for output array",
"yv",
"=",
"x",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"Mp",
"=",
"M",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
"filter2",
"=",
"np",
".",
"exp",
"(",
"1j",
"*",
"yv",
"*",
"km",
"*",
"(",
"Mp",
"-",
"1",
")",
")",
"# .reshape(1,lN,lN)",
"projection",
"=",
"projection",
".",
"reshape",
"(",
"A",
",",
"1",
",",
"lN",
")",
"# * filter2",
"# Prepare complex output image",
"if",
"onlyreal",
":",
"outarr",
"=",
"np",
".",
"zeros",
"(",
"(",
"ln",
",",
"ln",
")",
")",
"else",
":",
"outarr",
"=",
"np",
".",
"zeros",
"(",
"(",
"ln",
",",
"ln",
")",
",",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"complex",
")",
")",
"if",
"count",
"is",
"not",
"None",
":",
"count",
".",
"value",
"+=",
"1",
"# Calculate backpropagations",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"A",
")",
":",
"# Create an interpolation object of the projection.",
"# interpolation of the rotated fourier transformed projection",
"# this is already tiled onto the entire image.",
"sino_filtered",
"=",
"np",
".",
"fft",
".",
"ifft",
"(",
"projection",
"[",
"i",
"]",
"*",
"filter2",
",",
"axis",
"=",
"-",
"1",
")",
"# Resize filtered sinogram back to original size",
"sino",
"=",
"sino_filtered",
"[",
":",
"ln",
",",
"padl",
":",
"padl",
"+",
"ln",
"]",
"rotated_projr",
"=",
"scipy",
".",
"ndimage",
".",
"interpolation",
".",
"rotate",
"(",
"sino",
".",
"real",
",",
"-",
"angles",
"[",
"i",
"]",
"*",
"180",
"/",
"np",
".",
"pi",
",",
"reshape",
"=",
"False",
",",
"mode",
"=",
"\"constant\"",
",",
"cval",
"=",
"0",
")",
"# Append results",
"outarr",
"+=",
"rotated_projr",
"if",
"not",
"onlyreal",
":",
"outarr",
"+=",
"1j",
"*",
"scipy",
".",
"ndimage",
".",
"interpolation",
".",
"rotate",
"(",
"sino",
".",
"imag",
",",
"-",
"angles",
"[",
"i",
"]",
"*",
"180",
"/",
"np",
".",
"pi",
",",
"reshape",
"=",
"False",
",",
"mode",
"=",
"\"constant\"",
",",
"cval",
"=",
"0",
")",
"if",
"count",
"is",
"not",
"None",
":",
"count",
".",
"value",
"+=",
"1",
"return",
"outarr"
] |
r"""2D backpropagation with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This method implements the 2D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{1D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{1D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}) \right \}
}{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{1D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{1D}}` 1D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximate zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
radontea.backproject: backprojection based on the Fourier slice
theorem
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
|
[
"r",
"2D",
"backpropagation",
"with",
"the",
"Fourier",
"diffraction",
"theorem"
] |
python
|
train
|
apple/turicreate
|
src/unity/python/turicreate/toolkits/_decision_tree.py
|
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_decision_tree.py#L373-L401
|
def get_prediction_score(self, node_id):
"""
Return the prediction score (if leaf node) or None if its an
intermediate node.
Parameters
----------
node_id: id of the node to get the prediction value.
Returns
-------
float or None: returns float value of prediction if leaf node and None
if not.
Examples
--------
.. sourcecode:: python
>>> tree.get_prediction_score(120) # Leaf node
0.251092
>>> tree.get_prediction_score(120) # Not a leaf node
None
"""
_raise_error_if_not_of_type(node_id, [int,long], "node_id")
_numeric_param_check_range("node_id", node_id, 0, self.num_nodes - 1)
node = self.nodes[node_id]
return None if node.is_leaf is False else node.value
|
[
"def",
"get_prediction_score",
"(",
"self",
",",
"node_id",
")",
":",
"_raise_error_if_not_of_type",
"(",
"node_id",
",",
"[",
"int",
",",
"long",
"]",
",",
"\"node_id\"",
")",
"_numeric_param_check_range",
"(",
"\"node_id\"",
",",
"node_id",
",",
"0",
",",
"self",
".",
"num_nodes",
"-",
"1",
")",
"node",
"=",
"self",
".",
"nodes",
"[",
"node_id",
"]",
"return",
"None",
"if",
"node",
".",
"is_leaf",
"is",
"False",
"else",
"node",
".",
"value"
] |
Return the prediction score (if leaf node) or None if its an
intermediate node.
Parameters
----------
node_id: id of the node to get the prediction value.
Returns
-------
float or None: returns float value of prediction if leaf node and None
if not.
Examples
--------
.. sourcecode:: python
>>> tree.get_prediction_score(120) # Leaf node
0.251092
>>> tree.get_prediction_score(120) # Not a leaf node
None
|
[
"Return",
"the",
"prediction",
"score",
"(",
"if",
"leaf",
"node",
")",
"or",
"None",
"if",
"its",
"an",
"intermediate",
"node",
"."
] |
python
|
train
|
bcbio/bcbio-nextgen
|
bcbio/structural/gatkcnv.py
|
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L34-L54
|
def _run_paired(paired):
"""Run somatic variant calling pipeline.
"""
from bcbio.structural import titancna
work_dir = _sv_workdir(paired.tumor_data)
seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data),
work_dir, paired)
call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data)
out = []
if paired.normal_data:
out.append(paired.normal_data)
if "sv" not in paired.tumor_data:
paired.tumor_data["sv"] = []
paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv",
"call_file": call_file,
"vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header,
_seg_to_vcf, paired.tumor_data),
"seg": seg_files["seg"],
"plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)})
out.append(paired.tumor_data)
return out
|
[
"def",
"_run_paired",
"(",
"paired",
")",
":",
"from",
"bcbio",
".",
"structural",
"import",
"titancna",
"work_dir",
"=",
"_sv_workdir",
"(",
"paired",
".",
"tumor_data",
")",
"seg_files",
"=",
"model_segments",
"(",
"tz",
".",
"get_in",
"(",
"[",
"\"depth\"",
",",
"\"bins\"",
",",
"\"normalized\"",
"]",
",",
"paired",
".",
"tumor_data",
")",
",",
"work_dir",
",",
"paired",
")",
"call_file",
"=",
"call_copy_numbers",
"(",
"seg_files",
"[",
"\"seg\"",
"]",
",",
"work_dir",
",",
"paired",
".",
"tumor_data",
")",
"out",
"=",
"[",
"]",
"if",
"paired",
".",
"normal_data",
":",
"out",
".",
"append",
"(",
"paired",
".",
"normal_data",
")",
"if",
"\"sv\"",
"not",
"in",
"paired",
".",
"tumor_data",
":",
"paired",
".",
"tumor_data",
"[",
"\"sv\"",
"]",
"=",
"[",
"]",
"paired",
".",
"tumor_data",
"[",
"\"sv\"",
"]",
".",
"append",
"(",
"{",
"\"variantcaller\"",
":",
"\"gatk-cnv\"",
",",
"\"call_file\"",
":",
"call_file",
",",
"\"vrn_file\"",
":",
"titancna",
".",
"to_vcf",
"(",
"call_file",
",",
"\"GATK4-CNV\"",
",",
"_get_seg_header",
",",
"_seg_to_vcf",
",",
"paired",
".",
"tumor_data",
")",
",",
"\"seg\"",
":",
"seg_files",
"[",
"\"seg\"",
"]",
",",
"\"plot\"",
":",
"plot_model_segments",
"(",
"seg_files",
",",
"work_dir",
",",
"paired",
".",
"tumor_data",
")",
"}",
")",
"out",
".",
"append",
"(",
"paired",
".",
"tumor_data",
")",
"return",
"out"
] |
Run somatic variant calling pipeline.
|
[
"Run",
"somatic",
"variant",
"calling",
"pipeline",
"."
] |
python
|
train
|
mila-iqia/fuel
|
fuel/utils/parallel.py
|
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/utils/parallel.py#L59-L113
|
def producer_consumer(producer, consumer, addr='tcp://127.0.0.1',
port=None, context=None):
"""A producer-consumer pattern.
Parameters
----------
producer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
consumer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PULL socket.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
port : int, optional
The port on which the consumer should listen.
context : zmq.Context, optional
The ZeroMQ Context to use. One will be created otherwise.
Returns
-------
result
Passes along whatever `consumer` returns.
Notes
-----
This sets up a PULL socket in the calling process and forks
a process that calls `producer` on a PUSH socket. When the
consumer returns, the producer process is terminated.
Wrap `consumer` or `producer` in a `functools.partial` object
in order to send additional arguments; the callables passed in
should expect only one required, positional argument, the socket
handle.
"""
context_created = False
if context is None:
context_created = True
context = zmq.Context()
try:
consumer_socket = context.socket(zmq.PULL)
if port is None:
port = consumer_socket.bind_to_random_port(addr)
try:
process = _spawn_producer(producer, port)
result = consumer(consumer_socket)
finally:
process.terminate()
return result
finally:
# Works around a Python 3.x bug.
if context_created:
context.destroy()
|
[
"def",
"producer_consumer",
"(",
"producer",
",",
"consumer",
",",
"addr",
"=",
"'tcp://127.0.0.1'",
",",
"port",
"=",
"None",
",",
"context",
"=",
"None",
")",
":",
"context_created",
"=",
"False",
"if",
"context",
"is",
"None",
":",
"context_created",
"=",
"True",
"context",
"=",
"zmq",
".",
"Context",
"(",
")",
"try",
":",
"consumer_socket",
"=",
"context",
".",
"socket",
"(",
"zmq",
".",
"PULL",
")",
"if",
"port",
"is",
"None",
":",
"port",
"=",
"consumer_socket",
".",
"bind_to_random_port",
"(",
"addr",
")",
"try",
":",
"process",
"=",
"_spawn_producer",
"(",
"producer",
",",
"port",
")",
"result",
"=",
"consumer",
"(",
"consumer_socket",
")",
"finally",
":",
"process",
".",
"terminate",
"(",
")",
"return",
"result",
"finally",
":",
"# Works around a Python 3.x bug.",
"if",
"context_created",
":",
"context",
".",
"destroy",
"(",
")"
] |
A producer-consumer pattern.
Parameters
----------
producer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
consumer : callable
Callable that takes a single argument, a handle
for a ZeroMQ PULL socket.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
port : int, optional
The port on which the consumer should listen.
context : zmq.Context, optional
The ZeroMQ Context to use. One will be created otherwise.
Returns
-------
result
Passes along whatever `consumer` returns.
Notes
-----
This sets up a PULL socket in the calling process and forks
a process that calls `producer` on a PUSH socket. When the
consumer returns, the producer process is terminated.
Wrap `consumer` or `producer` in a `functools.partial` object
in order to send additional arguments; the callables passed in
should expect only one required, positional argument, the socket
handle.
|
[
"A",
"producer",
"-",
"consumer",
"pattern",
"."
] |
python
|
train
|
awslabs/sockeye
|
sockeye/image_captioning/utils.py
|
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/image_captioning/utils.py#L142-L155
|
def save_features(paths: List[str], datas: List[np.ndarray],
compressed: bool = False) -> List:
"""
Save features specified with absolute paths.
:param paths: List of files specified with paths.
:param datas: List of numpy ndarrays to save into the respective files
:param compressed: Use numpy compression
:return: A list of file names.
"""
fnames = [] # type: List[str]
for path, data in zip(paths, datas):
fnames.append(save_feature(path, data, compressed))
return fnames
|
[
"def",
"save_features",
"(",
"paths",
":",
"List",
"[",
"str",
"]",
",",
"datas",
":",
"List",
"[",
"np",
".",
"ndarray",
"]",
",",
"compressed",
":",
"bool",
"=",
"False",
")",
"->",
"List",
":",
"fnames",
"=",
"[",
"]",
"# type: List[str]",
"for",
"path",
",",
"data",
"in",
"zip",
"(",
"paths",
",",
"datas",
")",
":",
"fnames",
".",
"append",
"(",
"save_feature",
"(",
"path",
",",
"data",
",",
"compressed",
")",
")",
"return",
"fnames"
] |
Save features specified with absolute paths.
:param paths: List of files specified with paths.
:param datas: List of numpy ndarrays to save into the respective files
:param compressed: Use numpy compression
:return: A list of file names.
|
[
"Save",
"features",
"specified",
"with",
"absolute",
"paths",
"."
] |
python
|
train
|
numan/py-analytics
|
analytics/backends/redis.py
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L280-L313
|
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
|
[
"def",
"get_metric_by_month",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"from_date",
",",
"limit",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"kwargs",
".",
"get",
"(",
"\"connection\"",
",",
"None",
")",
"first_of_month",
"=",
"datetime",
".",
"date",
"(",
"year",
"=",
"from_date",
".",
"year",
",",
"month",
"=",
"from_date",
".",
"month",
",",
"day",
"=",
"1",
")",
"metric_key_date_range",
"=",
"self",
".",
"_get_weekly_date_range",
"(",
"first_of_month",
",",
"relativedelta",
"(",
"months",
"=",
"limit",
")",
")",
"date_generator",
"=",
"(",
"first_of_month",
"+",
"relativedelta",
"(",
"months",
"=",
"i",
")",
"for",
"i",
"in",
"itertools",
".",
"count",
"(",
")",
")",
"#generate a list of first_of_month's in between the start date and the end date",
"series",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"date_generator",
",",
"limit",
")",
")",
"metric_keys",
"=",
"[",
"self",
".",
"_get_monthly_metric_name",
"(",
"metric",
",",
"month_date",
")",
"for",
"month_date",
"in",
"series",
"]",
"metric_func",
"=",
"lambda",
"conn",
":",
"[",
"conn",
".",
"hmget",
"(",
"self",
".",
"_get_weekly_metric_key",
"(",
"unique_identifier",
",",
"metric_key_date",
")",
",",
"metric_keys",
")",
"for",
"metric_key_date",
"in",
"metric_key_date_range",
"]",
"if",
"conn",
"is",
"not",
"None",
":",
"results",
"=",
"metric_func",
"(",
"conn",
")",
"else",
":",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"results",
"=",
"metric_func",
"(",
"conn",
")",
"series",
",",
"results",
"=",
"self",
".",
"_parse_and_process_metrics",
"(",
"series",
",",
"results",
")",
"return",
"series",
",",
"results"
] |
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
|
[
"Returns",
"the",
"metric",
"for",
"unique_identifier",
"segmented",
"by",
"month",
"starting",
"from",
"from_date",
".",
"It",
"will",
"retrieve",
"metrics",
"data",
"starting",
"from",
"the",
"1st",
"of",
"the",
"month",
"specified",
"in",
"from_date"
] |
python
|
train
|
openstack/networking-cisco
|
networking_cisco/plugins/cisco/l3/schedulers/l3_routertype_aware_agent_scheduler.py
|
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/l3/schedulers/l3_routertype_aware_agent_scheduler.py#L58-L80
|
def _filter_unscheduled_routers(self, plugin, context, routers):
"""Filter from list of routers the ones that are not scheduled.
Only for release < pike.
"""
if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]:
context, plugin = plugin, context
unscheduled_routers = []
for router in routers:
if (router[routertype.TYPE_ATTR] !=
plugin.get_namespace_router_type_id(context)):
# ignore non-namespace routers
continue
l3_agents = plugin.get_l3_agents_hosting_routers(
context, [router['id']])
if l3_agents:
LOG.debug('Router %(router_id)s has already been '
'hosted by L3 agent %(agent_id)s',
{'router_id': router['id'],
'agent_id': l3_agents[0]['id']})
else:
unscheduled_routers.append(router)
return unscheduled_routers
|
[
"def",
"_filter_unscheduled_routers",
"(",
"self",
",",
"plugin",
",",
"context",
",",
"routers",
")",
":",
"if",
"NEUTRON_VERSION",
".",
"version",
"[",
"0",
"]",
"<=",
"NEUTRON_NEWTON_VERSION",
".",
"version",
"[",
"0",
"]",
":",
"context",
",",
"plugin",
"=",
"plugin",
",",
"context",
"unscheduled_routers",
"=",
"[",
"]",
"for",
"router",
"in",
"routers",
":",
"if",
"(",
"router",
"[",
"routertype",
".",
"TYPE_ATTR",
"]",
"!=",
"plugin",
".",
"get_namespace_router_type_id",
"(",
"context",
")",
")",
":",
"# ignore non-namespace routers",
"continue",
"l3_agents",
"=",
"plugin",
".",
"get_l3_agents_hosting_routers",
"(",
"context",
",",
"[",
"router",
"[",
"'id'",
"]",
"]",
")",
"if",
"l3_agents",
":",
"LOG",
".",
"debug",
"(",
"'Router %(router_id)s has already been '",
"'hosted by L3 agent %(agent_id)s'",
",",
"{",
"'router_id'",
":",
"router",
"[",
"'id'",
"]",
",",
"'agent_id'",
":",
"l3_agents",
"[",
"0",
"]",
"[",
"'id'",
"]",
"}",
")",
"else",
":",
"unscheduled_routers",
".",
"append",
"(",
"router",
")",
"return",
"unscheduled_routers"
] |
Filter from list of routers the ones that are not scheduled.
Only for release < pike.
|
[
"Filter",
"from",
"list",
"of",
"routers",
"the",
"ones",
"that",
"are",
"not",
"scheduled",
"."
] |
python
|
train
|
geertj/gruvi
|
lib/gruvi/hub.py
|
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/hub.py#L133-L139
|
def switch(self, value=None):
"""Switch back to the origin fiber. The fiber is switch in next time
the event loop runs."""
if self._hub is None or not self._fiber.is_alive():
return
self._hub.run_callback(self._fiber.switch, value)
self._hub = self._fiber = None
|
[
"def",
"switch",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"self",
".",
"_hub",
"is",
"None",
"or",
"not",
"self",
".",
"_fiber",
".",
"is_alive",
"(",
")",
":",
"return",
"self",
".",
"_hub",
".",
"run_callback",
"(",
"self",
".",
"_fiber",
".",
"switch",
",",
"value",
")",
"self",
".",
"_hub",
"=",
"self",
".",
"_fiber",
"=",
"None"
] |
Switch back to the origin fiber. The fiber is switch in next time
the event loop runs.
|
[
"Switch",
"back",
"to",
"the",
"origin",
"fiber",
".",
"The",
"fiber",
"is",
"switch",
"in",
"next",
"time",
"the",
"event",
"loop",
"runs",
"."
] |
python
|
train
|
pvlib/pvlib-python
|
pvlib/singlediode.py
|
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/singlediode.py#L174-L233
|
def bishop88_i_from_v(voltage, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
method='newton'):
"""
Find current given any voltage.
Parameters
----------
voltage : numeric
voltage (V) in volts [V]
photocurrent : numeric
photogenerated current (Iph or IL) in amperes [A]
saturation_current : numeric
diode dark or saturation current (Io or Isat) in amperes [A]
resistance_series : numeric
series resistance (Rs) in ohms
resistance_shunt : numeric
shunt resistance (Rsh) in ohms
nNsVth : numeric
product of diode ideality factor (n), number of series cells (Ns), and
thermal voltage (Vth = k_b * T / q_e) in volts [V]
method : str
one of two optional search methods: either ``'brentq'``, a reliable and
bounded method or ``'newton'`` which is the default.
Returns
-------
current : numeric
current (I) at the specified voltage (V) in amperes [A]
"""
# collect args
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth)
def fv(x, v, *a):
# calculate voltage residual given diode voltage "x"
return bishop88(x, *a)[1] - v
if method.lower() == 'brentq':
# first bound the search using voc
voc_est = estimate_voc(photocurrent, saturation_current, nNsVth)
# brentq only works with scalar inputs, so we need a set up function
# and np.vectorize to repeatedly call the optimizer with the right
# arguments for possible array input
def vd_from_brent(voc, v, iph, isat, rs, rsh, gamma):
return brentq(fv, 0.0, voc, args=(v, iph, isat, rs, rsh, gamma))
vd_from_brent_vectorized = np.vectorize(vd_from_brent)
vd = vd_from_brent_vectorized(voc_est, voltage, *args)
elif method.lower() == 'newton':
# make sure all args are numpy arrays if max size > 1
# if voltage is an array, then make a copy to use for initial guess, v0
args, v0 = _prepare_newton_inputs((voltage,), args, voltage)
vd = newton(func=lambda x, *a: fv(x, voltage, *a), x0=v0,
fprime=lambda x, *a: bishop88(x, *a, gradients=True)[4],
args=args)
else:
raise NotImplementedError("Method '%s' isn't implemented" % method)
return bishop88(vd, *args)[0]
|
[
"def",
"bishop88_i_from_v",
"(",
"voltage",
",",
"photocurrent",
",",
"saturation_current",
",",
"resistance_series",
",",
"resistance_shunt",
",",
"nNsVth",
",",
"method",
"=",
"'newton'",
")",
":",
"# collect args",
"args",
"=",
"(",
"photocurrent",
",",
"saturation_current",
",",
"resistance_series",
",",
"resistance_shunt",
",",
"nNsVth",
")",
"def",
"fv",
"(",
"x",
",",
"v",
",",
"*",
"a",
")",
":",
"# calculate voltage residual given diode voltage \"x\"",
"return",
"bishop88",
"(",
"x",
",",
"*",
"a",
")",
"[",
"1",
"]",
"-",
"v",
"if",
"method",
".",
"lower",
"(",
")",
"==",
"'brentq'",
":",
"# first bound the search using voc",
"voc_est",
"=",
"estimate_voc",
"(",
"photocurrent",
",",
"saturation_current",
",",
"nNsVth",
")",
"# brentq only works with scalar inputs, so we need a set up function",
"# and np.vectorize to repeatedly call the optimizer with the right",
"# arguments for possible array input",
"def",
"vd_from_brent",
"(",
"voc",
",",
"v",
",",
"iph",
",",
"isat",
",",
"rs",
",",
"rsh",
",",
"gamma",
")",
":",
"return",
"brentq",
"(",
"fv",
",",
"0.0",
",",
"voc",
",",
"args",
"=",
"(",
"v",
",",
"iph",
",",
"isat",
",",
"rs",
",",
"rsh",
",",
"gamma",
")",
")",
"vd_from_brent_vectorized",
"=",
"np",
".",
"vectorize",
"(",
"vd_from_brent",
")",
"vd",
"=",
"vd_from_brent_vectorized",
"(",
"voc_est",
",",
"voltage",
",",
"*",
"args",
")",
"elif",
"method",
".",
"lower",
"(",
")",
"==",
"'newton'",
":",
"# make sure all args are numpy arrays if max size > 1",
"# if voltage is an array, then make a copy to use for initial guess, v0",
"args",
",",
"v0",
"=",
"_prepare_newton_inputs",
"(",
"(",
"voltage",
",",
")",
",",
"args",
",",
"voltage",
")",
"vd",
"=",
"newton",
"(",
"func",
"=",
"lambda",
"x",
",",
"*",
"a",
":",
"fv",
"(",
"x",
",",
"voltage",
",",
"*",
"a",
")",
",",
"x0",
"=",
"v0",
",",
"fprime",
"=",
"lambda",
"x",
",",
"*",
"a",
":",
"bishop88",
"(",
"x",
",",
"*",
"a",
",",
"gradients",
"=",
"True",
")",
"[",
"4",
"]",
",",
"args",
"=",
"args",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"Method '%s' isn't implemented\"",
"%",
"method",
")",
"return",
"bishop88",
"(",
"vd",
",",
"*",
"args",
")",
"[",
"0",
"]"
] |
Find current given any voltage.
Parameters
----------
voltage : numeric
voltage (V) in volts [V]
photocurrent : numeric
photogenerated current (Iph or IL) in amperes [A]
saturation_current : numeric
diode dark or saturation current (Io or Isat) in amperes [A]
resistance_series : numeric
series resistance (Rs) in ohms
resistance_shunt : numeric
shunt resistance (Rsh) in ohms
nNsVth : numeric
product of diode ideality factor (n), number of series cells (Ns), and
thermal voltage (Vth = k_b * T / q_e) in volts [V]
method : str
one of two optional search methods: either ``'brentq'``, a reliable and
bounded method or ``'newton'`` which is the default.
Returns
-------
current : numeric
current (I) at the specified voltage (V) in amperes [A]
|
[
"Find",
"current",
"given",
"any",
"voltage",
"."
] |
python
|
train
|
openstack/proliantutils
|
proliantutils/hpssa/manager.py
|
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/manager.py#L300-L317
|
def delete_configuration():
"""Delete a RAID configuration on this server.
:returns: the current RAID configuration after deleting all
the logical disks.
"""
server = objects.Server()
select_controllers = lambda x: not x.properties.get('HBA Mode Enabled',
False)
_select_controllers_by(server, select_controllers, 'RAID enabled')
for controller in server.controllers:
# Trigger delete only if there is some RAID array, otherwise
# hpssacli/ssacli will fail saying "no logical drives found.".
if controller.raid_arrays:
controller.delete_all_logical_drives()
return get_configuration()
|
[
"def",
"delete_configuration",
"(",
")",
":",
"server",
"=",
"objects",
".",
"Server",
"(",
")",
"select_controllers",
"=",
"lambda",
"x",
":",
"not",
"x",
".",
"properties",
".",
"get",
"(",
"'HBA Mode Enabled'",
",",
"False",
")",
"_select_controllers_by",
"(",
"server",
",",
"select_controllers",
",",
"'RAID enabled'",
")",
"for",
"controller",
"in",
"server",
".",
"controllers",
":",
"# Trigger delete only if there is some RAID array, otherwise",
"# hpssacli/ssacli will fail saying \"no logical drives found.\".",
"if",
"controller",
".",
"raid_arrays",
":",
"controller",
".",
"delete_all_logical_drives",
"(",
")",
"return",
"get_configuration",
"(",
")"
] |
Delete a RAID configuration on this server.
:returns: the current RAID configuration after deleting all
the logical disks.
|
[
"Delete",
"a",
"RAID",
"configuration",
"on",
"this",
"server",
"."
] |
python
|
train
|
saltstack/salt
|
salt/states/file.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/file.py#L648-L667
|
def _find_keep_files(root, keep):
'''
Compile a list of valid keep files (and directories).
Used by _clean_dir()
'''
real_keep = set()
real_keep.add(root)
if isinstance(keep, list):
for fn_ in keep:
if not os.path.isabs(fn_):
continue
fn_ = os.path.normcase(os.path.abspath(fn_))
real_keep.add(fn_)
while True:
fn_ = os.path.abspath(os.path.dirname(fn_))
real_keep.add(fn_)
drive, path = os.path.splitdrive(fn_)
if not path.lstrip(os.sep):
break
return real_keep
|
[
"def",
"_find_keep_files",
"(",
"root",
",",
"keep",
")",
":",
"real_keep",
"=",
"set",
"(",
")",
"real_keep",
".",
"add",
"(",
"root",
")",
"if",
"isinstance",
"(",
"keep",
",",
"list",
")",
":",
"for",
"fn_",
"in",
"keep",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"fn_",
")",
":",
"continue",
"fn_",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"fn_",
")",
")",
"real_keep",
".",
"add",
"(",
"fn_",
")",
"while",
"True",
":",
"fn_",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"fn_",
")",
")",
"real_keep",
".",
"add",
"(",
"fn_",
")",
"drive",
",",
"path",
"=",
"os",
".",
"path",
".",
"splitdrive",
"(",
"fn_",
")",
"if",
"not",
"path",
".",
"lstrip",
"(",
"os",
".",
"sep",
")",
":",
"break",
"return",
"real_keep"
] |
Compile a list of valid keep files (and directories).
Used by _clean_dir()
|
[
"Compile",
"a",
"list",
"of",
"valid",
"keep",
"files",
"(",
"and",
"directories",
")",
".",
"Used",
"by",
"_clean_dir",
"()"
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.