repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
chemlab/chemlab | chemlab/core/spacegroup/cell.py | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/core/spacegroup/cell.py#L101-L105 | def metric_from_cell(cell):
"""Calculates the metric matrix from cell, which is given in the
Cartesian system."""
cell = np.asarray(cell, dtype=float)
return np.dot(cell, cell.T) | [
"def",
"metric_from_cell",
"(",
"cell",
")",
":",
"cell",
"=",
"np",
".",
"asarray",
"(",
"cell",
",",
"dtype",
"=",
"float",
")",
"return",
"np",
".",
"dot",
"(",
"cell",
",",
"cell",
".",
"T",
")"
] | Calculates the metric matrix from cell, which is given in the
Cartesian system. | [
"Calculates",
"the",
"metric",
"matrix",
"from",
"cell",
"which",
"is",
"given",
"in",
"the",
"Cartesian",
"system",
"."
] | python | train |
totokaka/pySpaceGDN | pyspacegdn/requests/find_request.py | https://github.com/totokaka/pySpaceGDN/blob/55c8be8d751e24873e0a7f7e99d2b715442ec878/pyspacegdn/requests/find_request.py#L73-L91 | def sort(self, *sort):
""" Sort the results.
Define how the results should be sorted. The arguments should be tuples
of string defining the key and direction to sort by. For example
`('name', 'asc')` and `('version', 'desc')`. The first sorte rule is
considered first by the API. See also the API documentation on
`sorting`_.
Arguments:
`*sort` (`tuple`)
The rules to sort by
.. _sorting: https://github.com/XereoNet/SpaceGDN/wiki/API#sorting
"""
self.add_get_param('sort', FILTER_DELIMITER.join(
[ELEMENT_DELIMITER.join(elements) for elements in sort]))
return self | [
"def",
"sort",
"(",
"self",
",",
"*",
"sort",
")",
":",
"self",
".",
"add_get_param",
"(",
"'sort'",
",",
"FILTER_DELIMITER",
".",
"join",
"(",
"[",
"ELEMENT_DELIMITER",
".",
"join",
"(",
"elements",
")",
"for",
"elements",
"in",
"sort",
"]",
")",
")",
"return",
"self"
] | Sort the results.
Define how the results should be sorted. The arguments should be tuples
of string defining the key and direction to sort by. For example
`('name', 'asc')` and `('version', 'desc')`. The first sorte rule is
considered first by the API. See also the API documentation on
`sorting`_.
Arguments:
`*sort` (`tuple`)
The rules to sort by
.. _sorting: https://github.com/XereoNet/SpaceGDN/wiki/API#sorting | [
"Sort",
"the",
"results",
"."
] | python | train |
brettcannon/caniusepython3 | caniusepython3/pypi.py | https://github.com/brettcannon/caniusepython3/blob/195775d8f1891f73eb90734f3edda0c57e08dbf3/caniusepython3/pypi.py#L78-L90 | def supports_py3(project_name):
"""Check with PyPI if a project supports Python 3."""
log = logging.getLogger("ciu")
log.info("Checking {} ...".format(project_name))
request = requests.get("https://pypi.org/pypi/{}/json".format(project_name))
if request.status_code >= 400:
log = logging.getLogger("ciu")
log.warning("problem fetching {}, assuming ported ({})".format(
project_name, request.status_code))
return True
response = request.json()
return any(c.startswith("Programming Language :: Python :: 3")
for c in response["info"]["classifiers"]) | [
"def",
"supports_py3",
"(",
"project_name",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"\"ciu\"",
")",
"log",
".",
"info",
"(",
"\"Checking {} ...\"",
".",
"format",
"(",
"project_name",
")",
")",
"request",
"=",
"requests",
".",
"get",
"(",
"\"https://pypi.org/pypi/{}/json\"",
".",
"format",
"(",
"project_name",
")",
")",
"if",
"request",
".",
"status_code",
">=",
"400",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"\"ciu\"",
")",
"log",
".",
"warning",
"(",
"\"problem fetching {}, assuming ported ({})\"",
".",
"format",
"(",
"project_name",
",",
"request",
".",
"status_code",
")",
")",
"return",
"True",
"response",
"=",
"request",
".",
"json",
"(",
")",
"return",
"any",
"(",
"c",
".",
"startswith",
"(",
"\"Programming Language :: Python :: 3\"",
")",
"for",
"c",
"in",
"response",
"[",
"\"info\"",
"]",
"[",
"\"classifiers\"",
"]",
")"
] | Check with PyPI if a project supports Python 3. | [
"Check",
"with",
"PyPI",
"if",
"a",
"project",
"supports",
"Python",
"3",
"."
] | python | train |
oemof/oemof.db | oemof/db/tools.py | https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L234-L272 | def tz_from_geom(connection, geometry):
r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html
"""
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT tzid FROM oemof_test.tz_world
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
if not connection.execute(sql).fetchone():
sql = """
SELECT tzid FROM oemof_test.tz_world
ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;
""".format(wkt=coords.wkt)
return connection.execute(sql).fetchone()[0] | [
"def",
"tz_from_geom",
"(",
"connection",
",",
"geometry",
")",
":",
"# TODO@Günni",
"if",
"geometry",
".",
"geom_type",
"in",
"[",
"'Polygon'",
",",
"'MultiPolygon'",
"]",
":",
"coords",
"=",
"geometry",
".",
"centroid",
"else",
":",
"coords",
"=",
"geometry",
"sql",
"=",
"\"\"\"\n SELECT tzid FROM oemof_test.tz_world\n WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));\n \"\"\"",
".",
"format",
"(",
"wkt",
"=",
"coords",
".",
"wkt",
")",
"if",
"not",
"connection",
".",
"execute",
"(",
"sql",
")",
".",
"fetchone",
"(",
")",
":",
"sql",
"=",
"\"\"\"\n SELECT tzid FROM oemof_test.tz_world\n ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;\n \"\"\"",
".",
"format",
"(",
"wkt",
"=",
"coords",
".",
"wkt",
")",
"return",
"connection",
".",
"execute",
"(",
"sql",
")",
".",
"fetchone",
"(",
")",
"[",
"0",
"]"
] | r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html | [
"r",
"Finding",
"the",
"timezone",
"of",
"a",
"given",
"point",
"or",
"polygon",
"geometry",
"assuming",
"that",
"the",
"polygon",
"is",
"not",
"crossing",
"a",
"border",
"of",
"a",
"timezone",
".",
"For",
"a",
"given",
"point",
"or",
"polygon",
"geometry",
"not",
"located",
"within",
"the",
"timezone",
"dataset",
"(",
"e",
".",
"g",
".",
"sea",
")",
"the",
"nearest",
"timezone",
"based",
"on",
"the",
"bounding",
"boxes",
"of",
"the",
"geometries",
"is",
"returned",
"."
] | python | train |
aiogram/aiogram | aiogram/types/message.py | https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/types/message.py#L331-L374 | async def answer_audio(self, audio: typing.Union[base.InputFile, base.String],
caption: typing.Union[base.String, None] = None,
duration: typing.Union[base.Integer, None] = None,
performer: typing.Union[base.String, None] = None,
title: typing.Union[base.String, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_markup=None,
reply=False) -> Message:
"""
Use this method to send audio files, if you want Telegram clients to display them in the music player.
Your audio must be in the .mp3 format.
For sending voice messages, use the sendVoice method instead.
Source: https://core.telegram.org/bots/api#sendaudio
:param audio: Audio file to send.
:type audio: :obj:`typing.Union[base.InputFile, base.String]`
:param caption: Audio caption, 0-200 characters
:type caption: :obj:`typing.Union[base.String, None]`
:param duration: Duration of the audio in seconds
:type duration: :obj:`typing.Union[base.Integer, None]`
:param performer: Performer
:type performer: :obj:`typing.Union[base.String, None]`
:param title: Track name
:type title: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message`
"""
return await self.bot.send_audio(chat_id=self.chat.id,
audio=audio,
caption=caption,
duration=duration,
performer=performer,
title=title,
disable_notification=disable_notification,
reply_to_message_id=self.message_id if reply else None,
reply_markup=reply_markup) | [
"async",
"def",
"answer_audio",
"(",
"self",
",",
"audio",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"InputFile",
",",
"base",
".",
"String",
"]",
",",
"caption",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"String",
",",
"None",
"]",
"=",
"None",
",",
"duration",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"Integer",
",",
"None",
"]",
"=",
"None",
",",
"performer",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"String",
",",
"None",
"]",
"=",
"None",
",",
"title",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"String",
",",
"None",
"]",
"=",
"None",
",",
"disable_notification",
":",
"typing",
".",
"Union",
"[",
"base",
".",
"Boolean",
",",
"None",
"]",
"=",
"None",
",",
"reply_markup",
"=",
"None",
",",
"reply",
"=",
"False",
")",
"->",
"Message",
":",
"return",
"await",
"self",
".",
"bot",
".",
"send_audio",
"(",
"chat_id",
"=",
"self",
".",
"chat",
".",
"id",
",",
"audio",
"=",
"audio",
",",
"caption",
"=",
"caption",
",",
"duration",
"=",
"duration",
",",
"performer",
"=",
"performer",
",",
"title",
"=",
"title",
",",
"disable_notification",
"=",
"disable_notification",
",",
"reply_to_message_id",
"=",
"self",
".",
"message_id",
"if",
"reply",
"else",
"None",
",",
"reply_markup",
"=",
"reply_markup",
")"
] | Use this method to send audio files, if you want Telegram clients to display them in the music player.
Your audio must be in the .mp3 format.
For sending voice messages, use the sendVoice method instead.
Source: https://core.telegram.org/bots/api#sendaudio
:param audio: Audio file to send.
:type audio: :obj:`typing.Union[base.InputFile, base.String]`
:param caption: Audio caption, 0-200 characters
:type caption: :obj:`typing.Union[base.String, None]`
:param duration: Duration of the audio in seconds
:type duration: :obj:`typing.Union[base.Integer, None]`
:param performer: Performer
:type performer: :obj:`typing.Union[base.String, None]`
:param title: Track name
:type title: :obj:`typing.Union[base.String, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message` | [
"Use",
"this",
"method",
"to",
"send",
"audio",
"files",
"if",
"you",
"want",
"Telegram",
"clients",
"to",
"display",
"them",
"in",
"the",
"music",
"player",
".",
"Your",
"audio",
"must",
"be",
"in",
"the",
".",
"mp3",
"format",
"."
] | python | train |
cmbruns/pyopenvr | src/openvr/__init__.py | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3484-L3492 | def launchDashboardOverlay(self, pchAppKey):
"""
Launches the dashboard overlay application if it is not already running. This call is only valid for
dashboard overlay applications.
"""
fn = self.function_table.launchDashboardOverlay
result = fn(pchAppKey)
return result | [
"def",
"launchDashboardOverlay",
"(",
"self",
",",
"pchAppKey",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"launchDashboardOverlay",
"result",
"=",
"fn",
"(",
"pchAppKey",
")",
"return",
"result"
] | Launches the dashboard overlay application if it is not already running. This call is only valid for
dashboard overlay applications. | [
"Launches",
"the",
"dashboard",
"overlay",
"application",
"if",
"it",
"is",
"not",
"already",
"running",
".",
"This",
"call",
"is",
"only",
"valid",
"for",
"dashboard",
"overlay",
"applications",
"."
] | python | train |
phaethon/kamene | kamene/pton_ntop.py | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/pton_ntop.py#L15-L61 | def inet_pton(af, addr):
"""Convert an IP address from text representation into binary form"""
print('hello')
if af == socket.AF_INET:
return inet_aton(addr)
elif af == socket.AF_INET6:
# IPv6: The use of "::" indicates one or more groups of 16 bits of zeros.
# We deal with this form of wildcard using a special marker.
JOKER = b"*"
while b"::" in addr:
addr = addr.replace(b"::", b":" + JOKER + b":")
joker_pos = None
# The last part of an IPv6 address can be an IPv4 address
ipv4_addr = None
if b"." in addr:
ipv4_addr = addr.split(b":")[-1]
result = b""
parts = addr.split(b":")
for part in parts:
if part == JOKER:
# Wildcard is only allowed once
if joker_pos is None:
joker_pos = len(result)
else:
raise Exception("Illegal syntax for IP address")
elif part == ipv4_addr: # FIXME: Make sure IPv4 can only be last part
# FIXME: inet_aton allows IPv4 addresses with less than 4 octets
result += socket.inet_aton(ipv4_addr)
else:
# Each part must be 16bit. Add missing zeroes before decoding.
try:
result += part.rjust(4, b"0").decode("hex")
except TypeError:
raise Exception("Illegal syntax for IP address")
# If there's a wildcard, fill up with zeros to reach 128bit (16 bytes)
if JOKER in addr:
result = (result[:joker_pos] + b"\x00" * (16 - len(result))
+ result[joker_pos:])
if len(result) != 16:
raise Exception("Illegal syntax for IP address")
return result
else:
raise Exception("Address family not supported") | [
"def",
"inet_pton",
"(",
"af",
",",
"addr",
")",
":",
"print",
"(",
"'hello'",
")",
"if",
"af",
"==",
"socket",
".",
"AF_INET",
":",
"return",
"inet_aton",
"(",
"addr",
")",
"elif",
"af",
"==",
"socket",
".",
"AF_INET6",
":",
"# IPv6: The use of \"::\" indicates one or more groups of 16 bits of zeros.",
"# We deal with this form of wildcard using a special marker. ",
"JOKER",
"=",
"b\"*\"",
"while",
"b\"::\"",
"in",
"addr",
":",
"addr",
"=",
"addr",
".",
"replace",
"(",
"b\"::\"",
",",
"b\":\"",
"+",
"JOKER",
"+",
"b\":\"",
")",
"joker_pos",
"=",
"None",
"# The last part of an IPv6 address can be an IPv4 address",
"ipv4_addr",
"=",
"None",
"if",
"b\".\"",
"in",
"addr",
":",
"ipv4_addr",
"=",
"addr",
".",
"split",
"(",
"b\":\"",
")",
"[",
"-",
"1",
"]",
"result",
"=",
"b\"\"",
"parts",
"=",
"addr",
".",
"split",
"(",
"b\":\"",
")",
"for",
"part",
"in",
"parts",
":",
"if",
"part",
"==",
"JOKER",
":",
"# Wildcard is only allowed once",
"if",
"joker_pos",
"is",
"None",
":",
"joker_pos",
"=",
"len",
"(",
"result",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Illegal syntax for IP address\"",
")",
"elif",
"part",
"==",
"ipv4_addr",
":",
"# FIXME: Make sure IPv4 can only be last part",
"# FIXME: inet_aton allows IPv4 addresses with less than 4 octets ",
"result",
"+=",
"socket",
".",
"inet_aton",
"(",
"ipv4_addr",
")",
"else",
":",
"# Each part must be 16bit. Add missing zeroes before decoding. ",
"try",
":",
"result",
"+=",
"part",
".",
"rjust",
"(",
"4",
",",
"b\"0\"",
")",
".",
"decode",
"(",
"\"hex\"",
")",
"except",
"TypeError",
":",
"raise",
"Exception",
"(",
"\"Illegal syntax for IP address\"",
")",
"# If there's a wildcard, fill up with zeros to reach 128bit (16 bytes) ",
"if",
"JOKER",
"in",
"addr",
":",
"result",
"=",
"(",
"result",
"[",
":",
"joker_pos",
"]",
"+",
"b\"\\x00\"",
"*",
"(",
"16",
"-",
"len",
"(",
"result",
")",
")",
"+",
"result",
"[",
"joker_pos",
":",
"]",
")",
"if",
"len",
"(",
"result",
")",
"!=",
"16",
":",
"raise",
"Exception",
"(",
"\"Illegal syntax for IP address\"",
")",
"return",
"result",
"else",
":",
"raise",
"Exception",
"(",
"\"Address family not supported\"",
")"
] | Convert an IP address from text representation into binary form | [
"Convert",
"an",
"IP",
"address",
"from",
"text",
"representation",
"into",
"binary",
"form"
] | python | train |
archman/beamline | beamline/mathutils.py | https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/mathutils.py#L264-L278 | def transFringe(beta=None, rho=None):
""" Transport matrix of fringe field
:param beta: angle of rotation of pole-face in [RAD]
:param rho: bending radius in [m]
:return: 6x6 numpy array
"""
m = np.eye(6, 6, dtype=np.float64)
if None in (beta, rho):
print("warning: 'theta', 'rho' should be positive float numbers.")
return m
else:
m[1, 0] = np.tan(beta) / rho
m[3, 2] = -np.tan(beta) / rho
return m | [
"def",
"transFringe",
"(",
"beta",
"=",
"None",
",",
"rho",
"=",
"None",
")",
":",
"m",
"=",
"np",
".",
"eye",
"(",
"6",
",",
"6",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"if",
"None",
"in",
"(",
"beta",
",",
"rho",
")",
":",
"print",
"(",
"\"warning: 'theta', 'rho' should be positive float numbers.\"",
")",
"return",
"m",
"else",
":",
"m",
"[",
"1",
",",
"0",
"]",
"=",
"np",
".",
"tan",
"(",
"beta",
")",
"/",
"rho",
"m",
"[",
"3",
",",
"2",
"]",
"=",
"-",
"np",
".",
"tan",
"(",
"beta",
")",
"/",
"rho",
"return",
"m"
] | Transport matrix of fringe field
:param beta: angle of rotation of pole-face in [RAD]
:param rho: bending radius in [m]
:return: 6x6 numpy array | [
"Transport",
"matrix",
"of",
"fringe",
"field"
] | python | train |
aws/aws-encryption-sdk-python | src/aws_encryption_sdk/internal/formatting/encryption_context.py | https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/internal/formatting/encryption_context.py#L132-L170 | def deserialize_encryption_context(serialized_encryption_context):
"""Deserializes the contents of a byte string into a dictionary.
:param bytes serialized_encryption_context: Source byte string containing serialized dictionary
:returns: Deserialized encryption context
:rtype: dict
:raises SerializationError: if serialized encryption context is too large
:raises SerializationError: if duplicate key found in serialized encryption context
:raises SerializationError: if malformed data found in serialized encryption context
"""
if len(serialized_encryption_context) > aws_encryption_sdk.internal.defaults.MAX_BYTE_ARRAY_SIZE:
raise SerializationError("Serialized context is too long.")
if serialized_encryption_context == b"":
_LOGGER.debug("No encryption context data found")
return {}
deserialized_size = 0
encryption_context = {}
dict_size, deserialized_size = read_short(source=serialized_encryption_context, offset=deserialized_size)
_LOGGER.debug("Found %d keys", dict_size)
for _ in range(dict_size):
key_size, deserialized_size = read_short(source=serialized_encryption_context, offset=deserialized_size)
key, deserialized_size = read_string(
source=serialized_encryption_context, offset=deserialized_size, length=key_size
)
value_size, deserialized_size = read_short(source=serialized_encryption_context, offset=deserialized_size)
value, deserialized_size = read_string(
source=serialized_encryption_context, offset=deserialized_size, length=value_size
)
if key in encryption_context:
raise SerializationError("Duplicate key in serialized context.")
encryption_context[key] = value
if deserialized_size != len(serialized_encryption_context):
raise SerializationError("Formatting error: Extra data in serialized context.")
return encryption_context | [
"def",
"deserialize_encryption_context",
"(",
"serialized_encryption_context",
")",
":",
"if",
"len",
"(",
"serialized_encryption_context",
")",
">",
"aws_encryption_sdk",
".",
"internal",
".",
"defaults",
".",
"MAX_BYTE_ARRAY_SIZE",
":",
"raise",
"SerializationError",
"(",
"\"Serialized context is too long.\"",
")",
"if",
"serialized_encryption_context",
"==",
"b\"\"",
":",
"_LOGGER",
".",
"debug",
"(",
"\"No encryption context data found\"",
")",
"return",
"{",
"}",
"deserialized_size",
"=",
"0",
"encryption_context",
"=",
"{",
"}",
"dict_size",
",",
"deserialized_size",
"=",
"read_short",
"(",
"source",
"=",
"serialized_encryption_context",
",",
"offset",
"=",
"deserialized_size",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Found %d keys\"",
",",
"dict_size",
")",
"for",
"_",
"in",
"range",
"(",
"dict_size",
")",
":",
"key_size",
",",
"deserialized_size",
"=",
"read_short",
"(",
"source",
"=",
"serialized_encryption_context",
",",
"offset",
"=",
"deserialized_size",
")",
"key",
",",
"deserialized_size",
"=",
"read_string",
"(",
"source",
"=",
"serialized_encryption_context",
",",
"offset",
"=",
"deserialized_size",
",",
"length",
"=",
"key_size",
")",
"value_size",
",",
"deserialized_size",
"=",
"read_short",
"(",
"source",
"=",
"serialized_encryption_context",
",",
"offset",
"=",
"deserialized_size",
")",
"value",
",",
"deserialized_size",
"=",
"read_string",
"(",
"source",
"=",
"serialized_encryption_context",
",",
"offset",
"=",
"deserialized_size",
",",
"length",
"=",
"value_size",
")",
"if",
"key",
"in",
"encryption_context",
":",
"raise",
"SerializationError",
"(",
"\"Duplicate key in serialized context.\"",
")",
"encryption_context",
"[",
"key",
"]",
"=",
"value",
"if",
"deserialized_size",
"!=",
"len",
"(",
"serialized_encryption_context",
")",
":",
"raise",
"SerializationError",
"(",
"\"Formatting error: Extra data in serialized context.\"",
")",
"return",
"encryption_context"
] | Deserializes the contents of a byte string into a dictionary.
:param bytes serialized_encryption_context: Source byte string containing serialized dictionary
:returns: Deserialized encryption context
:rtype: dict
:raises SerializationError: if serialized encryption context is too large
:raises SerializationError: if duplicate key found in serialized encryption context
:raises SerializationError: if malformed data found in serialized encryption context | [
"Deserializes",
"the",
"contents",
"of",
"a",
"byte",
"string",
"into",
"a",
"dictionary",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/frontend/terminal/interactiveshell.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/terminal/interactiveshell.py#L637-L663 | def edit_syntax_error(self):
"""The bottom half of the syntax error handler called in the main loop.
Loop until syntax error is fixed or user cancels.
"""
while self.SyntaxTB.last_syntax_error:
# copy and clear last_syntax_error
err = self.SyntaxTB.clear_err_state()
if not self._should_recompile(err):
return
try:
# may set last_syntax_error again if a SyntaxError is raised
self.safe_execfile(err.filename,self.user_ns)
except:
self.showtraceback()
else:
try:
f = open(err.filename)
try:
# This should be inside a display_trap block and I
# think it is.
sys.displayhook(f.read())
finally:
f.close()
except:
self.showtraceback() | [
"def",
"edit_syntax_error",
"(",
"self",
")",
":",
"while",
"self",
".",
"SyntaxTB",
".",
"last_syntax_error",
":",
"# copy and clear last_syntax_error",
"err",
"=",
"self",
".",
"SyntaxTB",
".",
"clear_err_state",
"(",
")",
"if",
"not",
"self",
".",
"_should_recompile",
"(",
"err",
")",
":",
"return",
"try",
":",
"# may set last_syntax_error again if a SyntaxError is raised",
"self",
".",
"safe_execfile",
"(",
"err",
".",
"filename",
",",
"self",
".",
"user_ns",
")",
"except",
":",
"self",
".",
"showtraceback",
"(",
")",
"else",
":",
"try",
":",
"f",
"=",
"open",
"(",
"err",
".",
"filename",
")",
"try",
":",
"# This should be inside a display_trap block and I",
"# think it is.",
"sys",
".",
"displayhook",
"(",
"f",
".",
"read",
"(",
")",
")",
"finally",
":",
"f",
".",
"close",
"(",
")",
"except",
":",
"self",
".",
"showtraceback",
"(",
")"
] | The bottom half of the syntax error handler called in the main loop.
Loop until syntax error is fixed or user cancels. | [
"The",
"bottom",
"half",
"of",
"the",
"syntax",
"error",
"handler",
"called",
"in",
"the",
"main",
"loop",
"."
] | python | test |
fronzbot/blinkpy | blinkpy/api.py | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L177-L185 | def request_cameras(blink, network):
"""
Request all camera information.
:param Blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}/cameras".format(blink.urls.base_url, network)
return http_get(blink, url) | [
"def",
"request_cameras",
"(",
"blink",
",",
"network",
")",
":",
"url",
"=",
"\"{}/network/{}/cameras\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Request all camera information.
:param Blink: Blink instance.
:param network: Sync module network id. | [
"Request",
"all",
"camera",
"information",
"."
] | python | train |
SHDShim/pytheos | pytheos/eqn_therm_Speziale.py | https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_therm_Speziale.py#L33-L54 | def speziale_debyetemp(v, v0, gamma0, q0, q1, theta0):
"""
calculate Debye temperature for the Speziale equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q0: logarithmic derivative of Gruneisen parameter
:param q1: logarithmic derivative of Gruneisen parameter
:param theta0: Debye temperature at 1 bar in K
:return: Debye temperature in K
"""
if isuncertainties([v, v0, gamma0, q0, q1, theta0]):
f_vu = np.vectorize(uct.wrap(integrate_gamma),
excluded=[1, 2, 3, 4, 5, 6])
integ = f_vu(v, v0, gamma0, q0, q1, theta0)
theta = unp.exp(unp.log(theta0) - integ)
else:
f_v = np.vectorize(integrate_gamma, excluded=[1, 2, 3, 4, 5, 6])
integ = f_v(v, v0, gamma0, q0, q1, theta0)
theta = np.exp(np.log(theta0) - integ)
return theta | [
"def",
"speziale_debyetemp",
"(",
"v",
",",
"v0",
",",
"gamma0",
",",
"q0",
",",
"q1",
",",
"theta0",
")",
":",
"if",
"isuncertainties",
"(",
"[",
"v",
",",
"v0",
",",
"gamma0",
",",
"q0",
",",
"q1",
",",
"theta0",
"]",
")",
":",
"f_vu",
"=",
"np",
".",
"vectorize",
"(",
"uct",
".",
"wrap",
"(",
"integrate_gamma",
")",
",",
"excluded",
"=",
"[",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
",",
"6",
"]",
")",
"integ",
"=",
"f_vu",
"(",
"v",
",",
"v0",
",",
"gamma0",
",",
"q0",
",",
"q1",
",",
"theta0",
")",
"theta",
"=",
"unp",
".",
"exp",
"(",
"unp",
".",
"log",
"(",
"theta0",
")",
"-",
"integ",
")",
"else",
":",
"f_v",
"=",
"np",
".",
"vectorize",
"(",
"integrate_gamma",
",",
"excluded",
"=",
"[",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
",",
"6",
"]",
")",
"integ",
"=",
"f_v",
"(",
"v",
",",
"v0",
",",
"gamma0",
",",
"q0",
",",
"q1",
",",
"theta0",
")",
"theta",
"=",
"np",
".",
"exp",
"(",
"np",
".",
"log",
"(",
"theta0",
")",
"-",
"integ",
")",
"return",
"theta"
] | calculate Debye temperature for the Speziale equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q0: logarithmic derivative of Gruneisen parameter
:param q1: logarithmic derivative of Gruneisen parameter
:param theta0: Debye temperature at 1 bar in K
:return: Debye temperature in K | [
"calculate",
"Debye",
"temperature",
"for",
"the",
"Speziale",
"equation"
] | python | train |
pybel/pybel-tools | src/pybel_tools/mutation/collapse.py | https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/mutation/collapse.py#L86-L107 | def _collapse_edge_by_namespace(graph: BELGraph,
victim_namespaces: Strings,
survivor_namespaces: str,
relations: Strings) -> None:
"""Collapse pairs of nodes with the given namespaces that have the given relationship.
:param graph: A BEL Graph
:param victim_namespaces: The namespace(s) of the node to collapse
:param survivor_namespaces: The namespace of the node to keep
:param relations: The relation(s) to search
"""
relation_filter = build_relation_predicate(relations)
source_namespace_filter = build_source_namespace_filter(victim_namespaces)
target_namespace_filter = build_target_namespace_filter(survivor_namespaces)
edge_predicates = [
relation_filter,
source_namespace_filter,
target_namespace_filter
]
_collapse_edge_passing_predicates(graph, edge_predicates=edge_predicates) | [
"def",
"_collapse_edge_by_namespace",
"(",
"graph",
":",
"BELGraph",
",",
"victim_namespaces",
":",
"Strings",
",",
"survivor_namespaces",
":",
"str",
",",
"relations",
":",
"Strings",
")",
"->",
"None",
":",
"relation_filter",
"=",
"build_relation_predicate",
"(",
"relations",
")",
"source_namespace_filter",
"=",
"build_source_namespace_filter",
"(",
"victim_namespaces",
")",
"target_namespace_filter",
"=",
"build_target_namespace_filter",
"(",
"survivor_namespaces",
")",
"edge_predicates",
"=",
"[",
"relation_filter",
",",
"source_namespace_filter",
",",
"target_namespace_filter",
"]",
"_collapse_edge_passing_predicates",
"(",
"graph",
",",
"edge_predicates",
"=",
"edge_predicates",
")"
] | Collapse pairs of nodes with the given namespaces that have the given relationship.
:param graph: A BEL Graph
:param victim_namespaces: The namespace(s) of the node to collapse
:param survivor_namespaces: The namespace of the node to keep
:param relations: The relation(s) to search | [
"Collapse",
"pairs",
"of",
"nodes",
"with",
"the",
"given",
"namespaces",
"that",
"have",
"the",
"given",
"relationship",
"."
] | python | valid |
RLBot/RLBot | src/main/python/rlbot/utils/class_importer.py | https://github.com/RLBot/RLBot/blob/3f9b6bec8b9baf4dcfff0f6cf3103c8744ac6234/src/main/python/rlbot/utils/class_importer.py#L55-L63 | def load_external_class(python_file, base_class):
"""
Returns a tuple: (subclass of base_class, module)
"""
loaded_module = load_external_module(python_file)
# Find a class that extends base_class
loaded_class = extract_class(loaded_module, base_class)
return loaded_class, loaded_module | [
"def",
"load_external_class",
"(",
"python_file",
",",
"base_class",
")",
":",
"loaded_module",
"=",
"load_external_module",
"(",
"python_file",
")",
"# Find a class that extends base_class",
"loaded_class",
"=",
"extract_class",
"(",
"loaded_module",
",",
"base_class",
")",
"return",
"loaded_class",
",",
"loaded_module"
] | Returns a tuple: (subclass of base_class, module) | [
"Returns",
"a",
"tuple",
":",
"(",
"subclass",
"of",
"base_class",
"module",
")"
] | python | train |
creare-com/pydem | pydem/dem_processing.py | https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/dem_processing.py#L1020-L1027 | def _slopes_directions(self, data, dX, dY, method='tarboton'):
""" Wrapper to pick between various algorithms
"""
# %%
if method == 'tarboton':
return self._tarboton_slopes_directions(data, dX, dY)
elif method == 'central':
return self._central_slopes_directions(data, dX, dY) | [
"def",
"_slopes_directions",
"(",
"self",
",",
"data",
",",
"dX",
",",
"dY",
",",
"method",
"=",
"'tarboton'",
")",
":",
"# %%",
"if",
"method",
"==",
"'tarboton'",
":",
"return",
"self",
".",
"_tarboton_slopes_directions",
"(",
"data",
",",
"dX",
",",
"dY",
")",
"elif",
"method",
"==",
"'central'",
":",
"return",
"self",
".",
"_central_slopes_directions",
"(",
"data",
",",
"dX",
",",
"dY",
")"
] | Wrapper to pick between various algorithms | [
"Wrapper",
"to",
"pick",
"between",
"various",
"algorithms"
] | python | train |
macpaul/usonic | usonic/usonic.py | https://github.com/macpaul/usonic/blob/92ed69c785aebd54ecdf3e2553a6edc26d5e5878/usonic/usonic.py#L18-L100 | def read(sensor):
"""
distance of object in front of sensor in CM.
"""
import time
import RPi.GPIO as GPIO
# Disable any warning message such as GPIO pins in use
GPIO.setwarnings(False)
# use the values of the GPIO pins, and not the actual pin number
# so if you connect to GPIO 25 which is on pin number 22, the
# reference in this code is 25, which is the number of the GPIO
# port and not the number of the physical pin
GPIO.setmode(GPIO.BCM)
if sensor.gpio_in is 0:
raise RuntimeError('gpio_in, gpio_out attribute of Sensor object must be assigned before calling read')
else:
gpio_in = sensor.gpio_in
gpio_out = sensor.gpio_out
# point the software to the GPIO pins the sensor is using
# change these values to the pins you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
GPIO.setup(gpio_out, GPIO.OUT)
GPIO.setup(gpio_in, GPIO.IN)
GPIO.output(gpio_out, GPIO.LOW)
# found that the sensor can crash if there isn't a delay here
# no idea why. If you have odd crashing issues, increase delay
time.sleep(0.3)
# sensor manual says a pulse ength of 10Us will trigger the
# sensor to transmit 8 cycles of ultrasonic burst at 40kHz and
# wait for the reflected ultrasonic burst to be received
# to get a pulse length of 10Us we need to start the pulse, then
# wait for 10 microseconds, then stop the pulse. This will
# result in the pulse length being 10Us.
# start the pulse on the GPIO pin
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(gpio_out, True)
# wait 10 micro seconds (this is 0.00001 seconds) so the pulse
# length is 10Us as the sensor expects
time.sleep(0.00001)
# stop the pulse after the time above has passed
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(gpio_out, False)
# listen to the input pin. 0 means nothing is happening. Once a
# signal is received the value will be 1 so the while loop
# stops and has the last recorded time the signal was 0
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on the sensor
while GPIO.input(gpio_in) == 0:
signaloff = time.time()
# listen to the input pin. Once a signal is received, record the
# time the signal came through
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on the sensor
while GPIO.input(gpio_in) == 1:
signalon = time.time()
# work out the difference in the two recorded times above to
# calculate the distance of an object in front of the sensor
timepassed = signalon - signaloff
# we now have our distance but it's not in a useful unit of
# measurement. So now we convert this distance into centimetres
distance = timepassed * 17000
# we're no longer using the GPIO, so tell software we're done
GPIO.cleanup()
return distance | [
"def",
"read",
"(",
"sensor",
")",
":",
"import",
"time",
"import",
"RPi",
".",
"GPIO",
"as",
"GPIO",
"# Disable any warning message such as GPIO pins in use",
"GPIO",
".",
"setwarnings",
"(",
"False",
")",
"# use the values of the GPIO pins, and not the actual pin number",
"# so if you connect to GPIO 25 which is on pin number 22, the ",
"# reference in this code is 25, which is the number of the GPIO ",
"# port and not the number of the physical pin",
"GPIO",
".",
"setmode",
"(",
"GPIO",
".",
"BCM",
")",
"if",
"sensor",
".",
"gpio_in",
"is",
"0",
":",
"raise",
"RuntimeError",
"(",
"'gpio_in, gpio_out attribute of Sensor object must be assigned before calling read'",
")",
"else",
":",
"gpio_in",
"=",
"sensor",
".",
"gpio_in",
"gpio_out",
"=",
"sensor",
".",
"gpio_out",
"# point the software to the GPIO pins the sensor is using",
"# change these values to the pins you are using",
"# GPIO output = the pin that's connected to \"Trig\" on the sensor",
"# GPIO input = the pin that's connected to \"Echo\" on the sensor",
"GPIO",
".",
"setup",
"(",
"gpio_out",
",",
"GPIO",
".",
"OUT",
")",
"GPIO",
".",
"setup",
"(",
"gpio_in",
",",
"GPIO",
".",
"IN",
")",
"GPIO",
".",
"output",
"(",
"gpio_out",
",",
"GPIO",
".",
"LOW",
")",
"# found that the sensor can crash if there isn't a delay here",
"# no idea why. If you have odd crashing issues, increase delay",
"time",
".",
"sleep",
"(",
"0.3",
")",
"# sensor manual says a pulse ength of 10Us will trigger the ",
"# sensor to transmit 8 cycles of ultrasonic burst at 40kHz and ",
"# wait for the reflected ultrasonic burst to be received",
"# to get a pulse length of 10Us we need to start the pulse, then",
"# wait for 10 microseconds, then stop the pulse. This will ",
"# result in the pulse length being 10Us.",
"# start the pulse on the GPIO pin ",
"# change this value to the pin you are using",
"# GPIO output = the pin that's connected to \"Trig\" on the sensor",
"GPIO",
".",
"output",
"(",
"gpio_out",
",",
"True",
")",
"# wait 10 micro seconds (this is 0.00001 seconds) so the pulse",
"# length is 10Us as the sensor expects",
"time",
".",
"sleep",
"(",
"0.00001",
")",
"# stop the pulse after the time above has passed",
"# change this value to the pin you are using",
"# GPIO output = the pin that's connected to \"Trig\" on the sensor",
"GPIO",
".",
"output",
"(",
"gpio_out",
",",
"False",
")",
"# listen to the input pin. 0 means nothing is happening. Once a",
"# signal is received the value will be 1 so the while loop",
"# stops and has the last recorded time the signal was 0",
"# change this value to the pin you are using",
"# GPIO input = the pin that's connected to \"Echo\" on the sensor",
"while",
"GPIO",
".",
"input",
"(",
"gpio_in",
")",
"==",
"0",
":",
"signaloff",
"=",
"time",
".",
"time",
"(",
")",
"# listen to the input pin. Once a signal is received, record the",
"# time the signal came through",
"# change this value to the pin you are using",
"# GPIO input = the pin that's connected to \"Echo\" on the sensor",
"while",
"GPIO",
".",
"input",
"(",
"gpio_in",
")",
"==",
"1",
":",
"signalon",
"=",
"time",
".",
"time",
"(",
")",
"# work out the difference in the two recorded times above to ",
"# calculate the distance of an object in front of the sensor",
"timepassed",
"=",
"signalon",
"-",
"signaloff",
"# we now have our distance but it's not in a useful unit of",
"# measurement. So now we convert this distance into centimetres",
"distance",
"=",
"timepassed",
"*",
"17000",
"# we're no longer using the GPIO, so tell software we're done",
"GPIO",
".",
"cleanup",
"(",
")",
"return",
"distance"
] | distance of object in front of sensor in CM. | [
"distance",
"of",
"object",
"in",
"front",
"of",
"sensor",
"in",
"CM",
"."
] | python | train |
cmorisse/ikp3db | ikp3db.py | https://github.com/cmorisse/ikp3db/blob/a0f318d4e8494b2e6f2f07ec0f1202ca023c920f/ikp3db.py#L484-L488 | def update_active_breakpoint_flag(cls):
""" Checks all breakpoints to find wether at least one is active and
update `any_active_breakpoint` accordingly.
"""
cls.any_active_breakpoint=any([bp.enabled for bp in cls.breakpoints_by_number if bp]) | [
"def",
"update_active_breakpoint_flag",
"(",
"cls",
")",
":",
"cls",
".",
"any_active_breakpoint",
"=",
"any",
"(",
"[",
"bp",
".",
"enabled",
"for",
"bp",
"in",
"cls",
".",
"breakpoints_by_number",
"if",
"bp",
"]",
")"
] | Checks all breakpoints to find wether at least one is active and
update `any_active_breakpoint` accordingly. | [
"Checks",
"all",
"breakpoints",
"to",
"find",
"wether",
"at",
"least",
"one",
"is",
"active",
"and",
"update",
"any_active_breakpoint",
"accordingly",
"."
] | python | train |
globality-corp/microcosm-postgres | microcosm_postgres/migrate.py | https://github.com/globality-corp/microcosm-postgres/blob/43dd793b1fc9b84e4056700f350e79e0df5ff501/microcosm_postgres/migrate.py#L156-L187 | def patch_script_directory(graph):
"""
Monkey patch the `ScriptDirectory` class, working around configuration assumptions.
Changes include:
- Using a generated, temporary directory (with a generated, temporary `script.py.mako`)
instead of the assumed script directory.
- Using our `make_script_directory` function instead of the default `ScriptDirectory.from_config`.
- Using our `run_online_migration` function instead of the default `ScriptDirectory.run_env`.
- Injecting the current object graph.
"""
temporary_dir = mkdtemp()
from_config_original = getattr(ScriptDirectory, "from_config")
run_env_original = getattr(ScriptDirectory, "run_env")
# use a temporary directory for the revision template
with open(join(temporary_dir, "script.py.mako"), "w") as file_:
file_.write(make_script_py_mako())
file_.flush()
# monkey patch our script directory and migration logic
setattr(ScriptDirectory, "from_config", classmethod(make_script_directory))
setattr(ScriptDirectory, "run_env", run_online_migration)
setattr(ScriptDirectory, "graph", graph)
try:
yield temporary_dir
finally:
# cleanup
delattr(ScriptDirectory, "graph")
setattr(ScriptDirectory, "run_env", run_env_original)
setattr(ScriptDirectory, "from_config", from_config_original)
rmtree(temporary_dir) | [
"def",
"patch_script_directory",
"(",
"graph",
")",
":",
"temporary_dir",
"=",
"mkdtemp",
"(",
")",
"from_config_original",
"=",
"getattr",
"(",
"ScriptDirectory",
",",
"\"from_config\"",
")",
"run_env_original",
"=",
"getattr",
"(",
"ScriptDirectory",
",",
"\"run_env\"",
")",
"# use a temporary directory for the revision template",
"with",
"open",
"(",
"join",
"(",
"temporary_dir",
",",
"\"script.py.mako\"",
")",
",",
"\"w\"",
")",
"as",
"file_",
":",
"file_",
".",
"write",
"(",
"make_script_py_mako",
"(",
")",
")",
"file_",
".",
"flush",
"(",
")",
"# monkey patch our script directory and migration logic",
"setattr",
"(",
"ScriptDirectory",
",",
"\"from_config\"",
",",
"classmethod",
"(",
"make_script_directory",
")",
")",
"setattr",
"(",
"ScriptDirectory",
",",
"\"run_env\"",
",",
"run_online_migration",
")",
"setattr",
"(",
"ScriptDirectory",
",",
"\"graph\"",
",",
"graph",
")",
"try",
":",
"yield",
"temporary_dir",
"finally",
":",
"# cleanup",
"delattr",
"(",
"ScriptDirectory",
",",
"\"graph\"",
")",
"setattr",
"(",
"ScriptDirectory",
",",
"\"run_env\"",
",",
"run_env_original",
")",
"setattr",
"(",
"ScriptDirectory",
",",
"\"from_config\"",
",",
"from_config_original",
")",
"rmtree",
"(",
"temporary_dir",
")"
] | Monkey patch the `ScriptDirectory` class, working around configuration assumptions.
Changes include:
- Using a generated, temporary directory (with a generated, temporary `script.py.mako`)
instead of the assumed script directory.
- Using our `make_script_directory` function instead of the default `ScriptDirectory.from_config`.
- Using our `run_online_migration` function instead of the default `ScriptDirectory.run_env`.
- Injecting the current object graph. | [
"Monkey",
"patch",
"the",
"ScriptDirectory",
"class",
"working",
"around",
"configuration",
"assumptions",
"."
] | python | train |
DLR-RM/RAFCON | source/rafcon/gui/mygaphas/utils/gap_helper.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/utils/gap_helper.py#L138-L197 | def add_data_flow_to_state(from_port, to_port):
"""Interface method between Gaphas and RAFCON core for adding data flows
The method checks the types of the given ports and their relation. From this the necessary parameters for the
add_dat_flow method of the RAFCON core are determined. Also the parent state is derived from the ports.
:param from_port: Port from which the data flow starts
:param to_port: Port to which the data flow goes to
:return: True if a data flow was added, False if an error occurred
"""
from rafcon.gui.mygaphas.items.ports import InputPortView, OutputPortView, ScopedVariablePortView
from rafcon.gui.models.container_state import ContainerStateModel
from_state_v = from_port.parent
to_state_v = to_port.parent
from_state_m = from_state_v.model
to_state_m = to_state_v.model
from_state_id = from_state_m.state.state_id
to_state_id = to_state_m.state.state_id
from_port_id = from_port.port_id
to_port_id = to_port.port_id
if not isinstance(from_port, (InputPortView, OutputPortView, ScopedVariablePortView)) or \
not isinstance(from_port, (InputPortView, OutputPortView, ScopedVariablePortView)):
logger.error("Data flows only exist between data ports (input, output, scope). Given: {0} and {1}".format(type(
from_port), type(to_port)))
return False
responsible_parent_m = None
# from parent to child
if isinstance(from_state_m, ContainerStateModel) and \
check_if_dict_contains_object_reference_in_values(to_state_m.state, from_state_m.state.states):
responsible_parent_m = from_state_m
# from child to parent
elif isinstance(to_state_m, ContainerStateModel) and \
check_if_dict_contains_object_reference_in_values(from_state_m.state, to_state_m.state.states):
responsible_parent_m = to_state_m
# from parent to parent
elif isinstance(from_state_m, ContainerStateModel) and from_state_m.state is to_state_m.state:
responsible_parent_m = from_state_m # == to_state_m
# from child to child
elif (not from_state_m.state.is_root_state) and (not to_state_m.state.is_root_state) \
and from_state_m.state is not to_state_m.state \
and from_state_m.parent.state.state_id and to_state_m.parent.state.state_id:
responsible_parent_m = from_state_m.parent
if not isinstance(responsible_parent_m, ContainerStateModel):
logger.error("Data flows only exist in container states (e.g. hierarchy states)")
return False
try:
responsible_parent_m.state.add_data_flow(from_state_id, from_port_id, to_state_id, to_port_id)
return True
except (ValueError, AttributeError, TypeError) as e:
logger.error("Data flow couldn't be added: {0}".format(e))
return False | [
"def",
"add_data_flow_to_state",
"(",
"from_port",
",",
"to_port",
")",
":",
"from",
"rafcon",
".",
"gui",
".",
"mygaphas",
".",
"items",
".",
"ports",
"import",
"InputPortView",
",",
"OutputPortView",
",",
"ScopedVariablePortView",
"from",
"rafcon",
".",
"gui",
".",
"models",
".",
"container_state",
"import",
"ContainerStateModel",
"from_state_v",
"=",
"from_port",
".",
"parent",
"to_state_v",
"=",
"to_port",
".",
"parent",
"from_state_m",
"=",
"from_state_v",
".",
"model",
"to_state_m",
"=",
"to_state_v",
".",
"model",
"from_state_id",
"=",
"from_state_m",
".",
"state",
".",
"state_id",
"to_state_id",
"=",
"to_state_m",
".",
"state",
".",
"state_id",
"from_port_id",
"=",
"from_port",
".",
"port_id",
"to_port_id",
"=",
"to_port",
".",
"port_id",
"if",
"not",
"isinstance",
"(",
"from_port",
",",
"(",
"InputPortView",
",",
"OutputPortView",
",",
"ScopedVariablePortView",
")",
")",
"or",
"not",
"isinstance",
"(",
"from_port",
",",
"(",
"InputPortView",
",",
"OutputPortView",
",",
"ScopedVariablePortView",
")",
")",
":",
"logger",
".",
"error",
"(",
"\"Data flows only exist between data ports (input, output, scope). Given: {0} and {1}\"",
".",
"format",
"(",
"type",
"(",
"from_port",
")",
",",
"type",
"(",
"to_port",
")",
")",
")",
"return",
"False",
"responsible_parent_m",
"=",
"None",
"# from parent to child",
"if",
"isinstance",
"(",
"from_state_m",
",",
"ContainerStateModel",
")",
"and",
"check_if_dict_contains_object_reference_in_values",
"(",
"to_state_m",
".",
"state",
",",
"from_state_m",
".",
"state",
".",
"states",
")",
":",
"responsible_parent_m",
"=",
"from_state_m",
"# from child to parent",
"elif",
"isinstance",
"(",
"to_state_m",
",",
"ContainerStateModel",
")",
"and",
"check_if_dict_contains_object_reference_in_values",
"(",
"from_state_m",
".",
"state",
",",
"to_state_m",
".",
"state",
".",
"states",
")",
":",
"responsible_parent_m",
"=",
"to_state_m",
"# from parent to parent",
"elif",
"isinstance",
"(",
"from_state_m",
",",
"ContainerStateModel",
")",
"and",
"from_state_m",
".",
"state",
"is",
"to_state_m",
".",
"state",
":",
"responsible_parent_m",
"=",
"from_state_m",
"# == to_state_m",
"# from child to child",
"elif",
"(",
"not",
"from_state_m",
".",
"state",
".",
"is_root_state",
")",
"and",
"(",
"not",
"to_state_m",
".",
"state",
".",
"is_root_state",
")",
"and",
"from_state_m",
".",
"state",
"is",
"not",
"to_state_m",
".",
"state",
"and",
"from_state_m",
".",
"parent",
".",
"state",
".",
"state_id",
"and",
"to_state_m",
".",
"parent",
".",
"state",
".",
"state_id",
":",
"responsible_parent_m",
"=",
"from_state_m",
".",
"parent",
"if",
"not",
"isinstance",
"(",
"responsible_parent_m",
",",
"ContainerStateModel",
")",
":",
"logger",
".",
"error",
"(",
"\"Data flows only exist in container states (e.g. hierarchy states)\"",
")",
"return",
"False",
"try",
":",
"responsible_parent_m",
".",
"state",
".",
"add_data_flow",
"(",
"from_state_id",
",",
"from_port_id",
",",
"to_state_id",
",",
"to_port_id",
")",
"return",
"True",
"except",
"(",
"ValueError",
",",
"AttributeError",
",",
"TypeError",
")",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"Data flow couldn't be added: {0}\"",
".",
"format",
"(",
"e",
")",
")",
"return",
"False"
] | Interface method between Gaphas and RAFCON core for adding data flows
The method checks the types of the given ports and their relation. From this the necessary parameters for the
add_dat_flow method of the RAFCON core are determined. Also the parent state is derived from the ports.
:param from_port: Port from which the data flow starts
:param to_port: Port to which the data flow goes to
:return: True if a data flow was added, False if an error occurred | [
"Interface",
"method",
"between",
"Gaphas",
"and",
"RAFCON",
"core",
"for",
"adding",
"data",
"flows"
] | python | train |
pantsbuild/pants | src/python/pants/backend/jvm/subsystems/zinc.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/subsystems/zinc.py#L329-L369 | def compile_compiler_bridge(self, context):
"""Compile the compiler bridge to be used by zinc, using our scala bootstrapper.
It will compile and cache the jar, and materialize it if not already there.
:param context: The context of the task trying to compile the bridge.
This is mostly needed to use its scheduler to create digests of the relevant jars.
:return: The absolute path to the compiled scala-compiler-bridge jar.
"""
bridge_jar_name = 'scala-compiler-bridge.jar'
bridge_jar = os.path.join(self._compiler_bridge_cache_dir, bridge_jar_name)
global_bridge_cache_dir = os.path.join(self._zinc_factory.get_options().pants_bootstrapdir, fast_relpath(self._compiler_bridge_cache_dir, self._workdir()))
globally_cached_bridge_jar = os.path.join(global_bridge_cache_dir, bridge_jar_name)
# Workaround to avoid recompiling the bridge for every integration test
# We check the bootstrapdir (.cache) for the bridge.
# If it exists, we make a copy to the buildroot.
#
# TODO Remove when action caches are implemented.
if os.path.exists(globally_cached_bridge_jar):
# Cache the bridge jar under buildroot, to allow snapshotting
safe_mkdir(self._relative_to_buildroot(self._compiler_bridge_cache_dir))
safe_hardlink_or_copy(globally_cached_bridge_jar, bridge_jar)
if not os.path.exists(bridge_jar):
res = self._run_bootstrapper(bridge_jar, context)
context._scheduler.materialize_directories((
DirectoryToMaterialize(get_buildroot(), res.output_directory_digest),
))
# For the workaround above to work, we need to store a copy of the bridge in
# the bootstrapdir cache (.cache).
safe_mkdir(global_bridge_cache_dir)
safe_hardlink_or_copy(bridge_jar, globally_cached_bridge_jar)
return ClasspathEntry(bridge_jar, res.output_directory_digest)
else:
bridge_jar_snapshot = context._scheduler.capture_snapshots((PathGlobsAndRoot(
PathGlobs((self._relative_to_buildroot(bridge_jar),)),
text_type(get_buildroot())
),))[0]
bridge_jar_digest = bridge_jar_snapshot.directory_digest
return ClasspathEntry(bridge_jar, bridge_jar_digest) | [
"def",
"compile_compiler_bridge",
"(",
"self",
",",
"context",
")",
":",
"bridge_jar_name",
"=",
"'scala-compiler-bridge.jar'",
"bridge_jar",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_compiler_bridge_cache_dir",
",",
"bridge_jar_name",
")",
"global_bridge_cache_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_zinc_factory",
".",
"get_options",
"(",
")",
".",
"pants_bootstrapdir",
",",
"fast_relpath",
"(",
"self",
".",
"_compiler_bridge_cache_dir",
",",
"self",
".",
"_workdir",
"(",
")",
")",
")",
"globally_cached_bridge_jar",
"=",
"os",
".",
"path",
".",
"join",
"(",
"global_bridge_cache_dir",
",",
"bridge_jar_name",
")",
"# Workaround to avoid recompiling the bridge for every integration test",
"# We check the bootstrapdir (.cache) for the bridge.",
"# If it exists, we make a copy to the buildroot.",
"#",
"# TODO Remove when action caches are implemented.",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"globally_cached_bridge_jar",
")",
":",
"# Cache the bridge jar under buildroot, to allow snapshotting",
"safe_mkdir",
"(",
"self",
".",
"_relative_to_buildroot",
"(",
"self",
".",
"_compiler_bridge_cache_dir",
")",
")",
"safe_hardlink_or_copy",
"(",
"globally_cached_bridge_jar",
",",
"bridge_jar",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"bridge_jar",
")",
":",
"res",
"=",
"self",
".",
"_run_bootstrapper",
"(",
"bridge_jar",
",",
"context",
")",
"context",
".",
"_scheduler",
".",
"materialize_directories",
"(",
"(",
"DirectoryToMaterialize",
"(",
"get_buildroot",
"(",
")",
",",
"res",
".",
"output_directory_digest",
")",
",",
")",
")",
"# For the workaround above to work, we need to store a copy of the bridge in",
"# the bootstrapdir cache (.cache).",
"safe_mkdir",
"(",
"global_bridge_cache_dir",
")",
"safe_hardlink_or_copy",
"(",
"bridge_jar",
",",
"globally_cached_bridge_jar",
")",
"return",
"ClasspathEntry",
"(",
"bridge_jar",
",",
"res",
".",
"output_directory_digest",
")",
"else",
":",
"bridge_jar_snapshot",
"=",
"context",
".",
"_scheduler",
".",
"capture_snapshots",
"(",
"(",
"PathGlobsAndRoot",
"(",
"PathGlobs",
"(",
"(",
"self",
".",
"_relative_to_buildroot",
"(",
"bridge_jar",
")",
",",
")",
")",
",",
"text_type",
"(",
"get_buildroot",
"(",
")",
")",
")",
",",
")",
")",
"[",
"0",
"]",
"bridge_jar_digest",
"=",
"bridge_jar_snapshot",
".",
"directory_digest",
"return",
"ClasspathEntry",
"(",
"bridge_jar",
",",
"bridge_jar_digest",
")"
] | Compile the compiler bridge to be used by zinc, using our scala bootstrapper.
It will compile and cache the jar, and materialize it if not already there.
:param context: The context of the task trying to compile the bridge.
This is mostly needed to use its scheduler to create digests of the relevant jars.
:return: The absolute path to the compiled scala-compiler-bridge jar. | [
"Compile",
"the",
"compiler",
"bridge",
"to",
"be",
"used",
"by",
"zinc",
"using",
"our",
"scala",
"bootstrapper",
".",
"It",
"will",
"compile",
"and",
"cache",
"the",
"jar",
"and",
"materialize",
"it",
"if",
"not",
"already",
"there",
"."
] | python | train |
cloudant/python-cloudant | src/cloudant/database.py | https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L996-L1046 | def update_handler_result(self, ddoc_id, handler_name, doc_id=None, data=None, **params):
"""
Creates or updates a document from the specified database based on the
update handler function provided. Update handlers are used, for
example, to provide server-side modification timestamps, and document
updates to individual fields without the latest revision. You can
provide query parameters needed by the update handler function using
the ``params`` argument.
Create a document with a generated ID:
.. code-block:: python
# Assuming that 'update001' update handler exists as part of the
# 'ddoc001' design document in the remote database...
# Execute 'update001' to create a new document
resp = db.update_handler_result('ddoc001', 'update001', data={'name': 'John',
'message': 'hello'})
Create or update a document with the specified ID:
.. code-block:: python
# Assuming that 'update001' update handler exists as part of the
# 'ddoc001' design document in the remote database...
# Execute 'update001' to update document 'doc001' in the database
resp = db.update_handler_result('ddoc001', 'update001', 'doc001',
data={'month': 'July'})
For more details, see the `update handlers documentation
<https://console.bluemix.net/docs/services/Cloudant/api/design_documents.html#update-handlers>`_.
:param str ddoc_id: Design document id used to get result.
:param str handler_name: Name used in part to identify the
update handler function.
:param str doc_id: Optional document id used to specify the
document to be handled.
:returns: Result of update handler function in text format
"""
ddoc = DesignDocument(self, ddoc_id)
if doc_id:
resp = self.r_session.put(
'/'.join([ddoc.document_url, '_update', handler_name, doc_id]),
params=params, data=data)
else:
resp = self.r_session.post(
'/'.join([ddoc.document_url, '_update', handler_name]),
params=params, data=data)
resp.raise_for_status()
return resp.text | [
"def",
"update_handler_result",
"(",
"self",
",",
"ddoc_id",
",",
"handler_name",
",",
"doc_id",
"=",
"None",
",",
"data",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"ddoc",
"=",
"DesignDocument",
"(",
"self",
",",
"ddoc_id",
")",
"if",
"doc_id",
":",
"resp",
"=",
"self",
".",
"r_session",
".",
"put",
"(",
"'/'",
".",
"join",
"(",
"[",
"ddoc",
".",
"document_url",
",",
"'_update'",
",",
"handler_name",
",",
"doc_id",
"]",
")",
",",
"params",
"=",
"params",
",",
"data",
"=",
"data",
")",
"else",
":",
"resp",
"=",
"self",
".",
"r_session",
".",
"post",
"(",
"'/'",
".",
"join",
"(",
"[",
"ddoc",
".",
"document_url",
",",
"'_update'",
",",
"handler_name",
"]",
")",
",",
"params",
"=",
"params",
",",
"data",
"=",
"data",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"return",
"resp",
".",
"text"
] | Creates or updates a document from the specified database based on the
update handler function provided. Update handlers are used, for
example, to provide server-side modification timestamps, and document
updates to individual fields without the latest revision. You can
provide query parameters needed by the update handler function using
the ``params`` argument.
Create a document with a generated ID:
.. code-block:: python
# Assuming that 'update001' update handler exists as part of the
# 'ddoc001' design document in the remote database...
# Execute 'update001' to create a new document
resp = db.update_handler_result('ddoc001', 'update001', data={'name': 'John',
'message': 'hello'})
Create or update a document with the specified ID:
.. code-block:: python
# Assuming that 'update001' update handler exists as part of the
# 'ddoc001' design document in the remote database...
# Execute 'update001' to update document 'doc001' in the database
resp = db.update_handler_result('ddoc001', 'update001', 'doc001',
data={'month': 'July'})
For more details, see the `update handlers documentation
<https://console.bluemix.net/docs/services/Cloudant/api/design_documents.html#update-handlers>`_.
:param str ddoc_id: Design document id used to get result.
:param str handler_name: Name used in part to identify the
update handler function.
:param str doc_id: Optional document id used to specify the
document to be handled.
:returns: Result of update handler function in text format | [
"Creates",
"or",
"updates",
"a",
"document",
"from",
"the",
"specified",
"database",
"based",
"on",
"the",
"update",
"handler",
"function",
"provided",
".",
"Update",
"handlers",
"are",
"used",
"for",
"example",
"to",
"provide",
"server",
"-",
"side",
"modification",
"timestamps",
"and",
"document",
"updates",
"to",
"individual",
"fields",
"without",
"the",
"latest",
"revision",
".",
"You",
"can",
"provide",
"query",
"parameters",
"needed",
"by",
"the",
"update",
"handler",
"function",
"using",
"the",
"params",
"argument",
"."
] | python | train |
Riminder/python-riminder-api | riminder/profile.py | https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/profile.py#L426-L440 | def add(self, source_id, profile_data, training_metadata=[], profile_reference=None, timestamp_reception=None):
"""Use the api to add a new profile using profile_data."""
data = {
"source_id": _validate_source_id(source_id),
"profile_json": _validate_dict(profile_data, "profile_data"),
"training_metadata": _validate_training_metadata(training_metadata),
"profile_reference": profile_reference
}
# some enrichement for profile_json
if timestamp_reception is not None:
data['timestamp_reception'] = _validate_timestamp(timestamp_reception, 'timestamp_reception')
response = self.client.post("profile/json", data=data)
return response.json() | [
"def",
"add",
"(",
"self",
",",
"source_id",
",",
"profile_data",
",",
"training_metadata",
"=",
"[",
"]",
",",
"profile_reference",
"=",
"None",
",",
"timestamp_reception",
"=",
"None",
")",
":",
"data",
"=",
"{",
"\"source_id\"",
":",
"_validate_source_id",
"(",
"source_id",
")",
",",
"\"profile_json\"",
":",
"_validate_dict",
"(",
"profile_data",
",",
"\"profile_data\"",
")",
",",
"\"training_metadata\"",
":",
"_validate_training_metadata",
"(",
"training_metadata",
")",
",",
"\"profile_reference\"",
":",
"profile_reference",
"}",
"# some enrichement for profile_json",
"if",
"timestamp_reception",
"is",
"not",
"None",
":",
"data",
"[",
"'timestamp_reception'",
"]",
"=",
"_validate_timestamp",
"(",
"timestamp_reception",
",",
"'timestamp_reception'",
")",
"response",
"=",
"self",
".",
"client",
".",
"post",
"(",
"\"profile/json\"",
",",
"data",
"=",
"data",
")",
"return",
"response",
".",
"json",
"(",
")"
] | Use the api to add a new profile using profile_data. | [
"Use",
"the",
"api",
"to",
"add",
"a",
"new",
"profile",
"using",
"profile_data",
"."
] | python | train |
nprapps/mapturner | mapturner/__init__.py | https://github.com/nprapps/mapturner/blob/fc9747c9d1584af2053bff3df229a460ef2a5f62/mapturner/__init__.py#L193-L228 | def process_ogr2ogr(self, name, layer, input_path):
"""
Process a layer using ogr2ogr.
"""
output_path = os.path.join(TEMP_DIRECTORY, '%s.json' % name)
if os.path.exists(output_path):
os.remove(output_path)
ogr2ogr_cmd = [
'ogr2ogr',
'-f', 'GeoJSON',
'-clipsrc', self.config['bbox']
]
if 'where' in layer:
ogr2ogr_cmd.extend([
'-where', '"%s"' % layer['where']
])
ogr2ogr_cmd.extend([
output_path,
input_path
])
sys.stdout.write('* Running ogr2ogr\n')
if self.args.verbose:
sys.stdout.write(' %s\n' % ' '.join(ogr2ogr_cmd))
r = envoy.run(' '.join(ogr2ogr_cmd))
if r.status_code != 0:
sys.stderr.write(r.std_err)
return output_path | [
"def",
"process_ogr2ogr",
"(",
"self",
",",
"name",
",",
"layer",
",",
"input_path",
")",
":",
"output_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"TEMP_DIRECTORY",
",",
"'%s.json'",
"%",
"name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"output_path",
")",
":",
"os",
".",
"remove",
"(",
"output_path",
")",
"ogr2ogr_cmd",
"=",
"[",
"'ogr2ogr'",
",",
"'-f'",
",",
"'GeoJSON'",
",",
"'-clipsrc'",
",",
"self",
".",
"config",
"[",
"'bbox'",
"]",
"]",
"if",
"'where'",
"in",
"layer",
":",
"ogr2ogr_cmd",
".",
"extend",
"(",
"[",
"'-where'",
",",
"'\"%s\"'",
"%",
"layer",
"[",
"'where'",
"]",
"]",
")",
"ogr2ogr_cmd",
".",
"extend",
"(",
"[",
"output_path",
",",
"input_path",
"]",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'* Running ogr2ogr\\n'",
")",
"if",
"self",
".",
"args",
".",
"verbose",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"' %s\\n'",
"%",
"' '",
".",
"join",
"(",
"ogr2ogr_cmd",
")",
")",
"r",
"=",
"envoy",
".",
"run",
"(",
"' '",
".",
"join",
"(",
"ogr2ogr_cmd",
")",
")",
"if",
"r",
".",
"status_code",
"!=",
"0",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"r",
".",
"std_err",
")",
"return",
"output_path"
] | Process a layer using ogr2ogr. | [
"Process",
"a",
"layer",
"using",
"ogr2ogr",
"."
] | python | train |
Azure/azure-event-hubs-python | azure/eventprocessorhost/lease.py | https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/lease.py#L20-L31 | def with_partition_id(self, partition_id):
"""
Init with partition Id.
:param partition_id: ID of a given partition.
:type partition_id: str
"""
self.partition_id = partition_id
self.owner = None
self.token = None
self.epoch = 0
self.event_processor_context = None | [
"def",
"with_partition_id",
"(",
"self",
",",
"partition_id",
")",
":",
"self",
".",
"partition_id",
"=",
"partition_id",
"self",
".",
"owner",
"=",
"None",
"self",
".",
"token",
"=",
"None",
"self",
".",
"epoch",
"=",
"0",
"self",
".",
"event_processor_context",
"=",
"None"
] | Init with partition Id.
:param partition_id: ID of a given partition.
:type partition_id: str | [
"Init",
"with",
"partition",
"Id",
"."
] | python | train |
jwkvam/bowtie | bowtie/_app.py | https://github.com/jwkvam/bowtie/blob/c494850671ac805bf186fbf2bdb07d2a34ae876d/bowtie/_app.py#L1084-L1087 | def node_version():
"""Get node version."""
version = check_output(('node', '--version'))
return tuple(int(x) for x in version.strip()[1:].split(b'.')) | [
"def",
"node_version",
"(",
")",
":",
"version",
"=",
"check_output",
"(",
"(",
"'node'",
",",
"'--version'",
")",
")",
"return",
"tuple",
"(",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"version",
".",
"strip",
"(",
")",
"[",
"1",
":",
"]",
".",
"split",
"(",
"b'.'",
")",
")"
] | Get node version. | [
"Get",
"node",
"version",
"."
] | python | train |
aleontiev/dj | dj/commands/lint.py | https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/commands/lint.py#L13-L19 | def lint(args):
"""Run lint checks using flake8."""
application = get_current_application()
if not args:
args = [application.name, 'tests']
args = ['flake8'] + list(args)
run.main(args, standalone_mode=False) | [
"def",
"lint",
"(",
"args",
")",
":",
"application",
"=",
"get_current_application",
"(",
")",
"if",
"not",
"args",
":",
"args",
"=",
"[",
"application",
".",
"name",
",",
"'tests'",
"]",
"args",
"=",
"[",
"'flake8'",
"]",
"+",
"list",
"(",
"args",
")",
"run",
".",
"main",
"(",
"args",
",",
"standalone_mode",
"=",
"False",
")"
] | Run lint checks using flake8. | [
"Run",
"lint",
"checks",
"using",
"flake8",
"."
] | python | train |
cloud-custodian/cloud-custodian | tools/ops/mugc.py | https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/ops/mugc.py#L113-L129 | def resources_gc_prefix(options, policy_config, policy_collection):
"""Garbage collect old custodian policies based on prefix.
We attempt to introspect to find the event sources for a policy
but without the old configuration this is implicit.
"""
# Classify policies by region
policy_regions = {}
for p in policy_collection:
if p.execution_mode == 'poll':
continue
policy_regions.setdefault(p.options.region, []).append(p)
regions = get_gc_regions(options.regions)
for r in regions:
region_gc(options, r, policy_config, policy_regions.get(r, [])) | [
"def",
"resources_gc_prefix",
"(",
"options",
",",
"policy_config",
",",
"policy_collection",
")",
":",
"# Classify policies by region",
"policy_regions",
"=",
"{",
"}",
"for",
"p",
"in",
"policy_collection",
":",
"if",
"p",
".",
"execution_mode",
"==",
"'poll'",
":",
"continue",
"policy_regions",
".",
"setdefault",
"(",
"p",
".",
"options",
".",
"region",
",",
"[",
"]",
")",
".",
"append",
"(",
"p",
")",
"regions",
"=",
"get_gc_regions",
"(",
"options",
".",
"regions",
")",
"for",
"r",
"in",
"regions",
":",
"region_gc",
"(",
"options",
",",
"r",
",",
"policy_config",
",",
"policy_regions",
".",
"get",
"(",
"r",
",",
"[",
"]",
")",
")"
] | Garbage collect old custodian policies based on prefix.
We attempt to introspect to find the event sources for a policy
but without the old configuration this is implicit. | [
"Garbage",
"collect",
"old",
"custodian",
"policies",
"based",
"on",
"prefix",
"."
] | python | train |
srittau/python-asserts | asserts/__init__.py | https://github.com/srittau/python-asserts/blob/1d5c797031c68ee27552d1c94e7f918c3d3d0453/asserts/__init__.py#L80-L96 | def assert_boolean_true(expr, msg_fmt="{msg}"):
"""Fail the test unless the expression is the constant True.
>>> assert_boolean_true(True)
>>> assert_boolean_true("Hello World!")
Traceback (most recent call last):
...
AssertionError: 'Hello World!' is not True
The following msg_fmt arguments are supported:
* msg - the default error message
* expr - tested expression
"""
if expr is not True:
msg = "{!r} is not True".format(expr)
fail(msg_fmt.format(msg=msg, expr=expr)) | [
"def",
"assert_boolean_true",
"(",
"expr",
",",
"msg_fmt",
"=",
"\"{msg}\"",
")",
":",
"if",
"expr",
"is",
"not",
"True",
":",
"msg",
"=",
"\"{!r} is not True\"",
".",
"format",
"(",
"expr",
")",
"fail",
"(",
"msg_fmt",
".",
"format",
"(",
"msg",
"=",
"msg",
",",
"expr",
"=",
"expr",
")",
")"
] | Fail the test unless the expression is the constant True.
>>> assert_boolean_true(True)
>>> assert_boolean_true("Hello World!")
Traceback (most recent call last):
...
AssertionError: 'Hello World!' is not True
The following msg_fmt arguments are supported:
* msg - the default error message
* expr - tested expression | [
"Fail",
"the",
"test",
"unless",
"the",
"expression",
"is",
"the",
"constant",
"True",
"."
] | python | train |
ArduPilot/MAVProxy | MAVProxy/modules/lib/MacOS/backend_wx.py | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/MacOS/backend_wx.py#L1140-L1146 | def _onLeftButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 1, dblclick=True, guiEvent=evt) | [
"def",
"_onLeftButtonDClick",
"(",
"self",
",",
"evt",
")",
":",
"x",
"=",
"evt",
".",
"GetX",
"(",
")",
"y",
"=",
"self",
".",
"figure",
".",
"bbox",
".",
"height",
"-",
"evt",
".",
"GetY",
"(",
")",
"evt",
".",
"Skip",
"(",
")",
"self",
".",
"CaptureMouse",
"(",
")",
"FigureCanvasBase",
".",
"button_press_event",
"(",
"self",
",",
"x",
",",
"y",
",",
"1",
",",
"dblclick",
"=",
"True",
",",
"guiEvent",
"=",
"evt",
")"
] | Start measuring on an axis. | [
"Start",
"measuring",
"on",
"an",
"axis",
"."
] | python | train |
mikedh/trimesh | trimesh/collision.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/collision.py#L225-L243 | def set_transform(self, name, transform):
"""
Set the transform for one of the manager's objects.
This replaces the prior transform.
Parameters
----------
name : str
An identifier for the object already in the manager
transform : (4,4) float
A new homogenous transform matrix for the object
"""
if name in self._objs:
o = self._objs[name]['obj']
o.setRotation(transform[:3, :3])
o.setTranslation(transform[:3, 3])
self._manager.update(o)
else:
raise ValueError('{} not in collision manager!'.format(name)) | [
"def",
"set_transform",
"(",
"self",
",",
"name",
",",
"transform",
")",
":",
"if",
"name",
"in",
"self",
".",
"_objs",
":",
"o",
"=",
"self",
".",
"_objs",
"[",
"name",
"]",
"[",
"'obj'",
"]",
"o",
".",
"setRotation",
"(",
"transform",
"[",
":",
"3",
",",
":",
"3",
"]",
")",
"o",
".",
"setTranslation",
"(",
"transform",
"[",
":",
"3",
",",
"3",
"]",
")",
"self",
".",
"_manager",
".",
"update",
"(",
"o",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'{} not in collision manager!'",
".",
"format",
"(",
"name",
")",
")"
] | Set the transform for one of the manager's objects.
This replaces the prior transform.
Parameters
----------
name : str
An identifier for the object already in the manager
transform : (4,4) float
A new homogenous transform matrix for the object | [
"Set",
"the",
"transform",
"for",
"one",
"of",
"the",
"manager",
"s",
"objects",
".",
"This",
"replaces",
"the",
"prior",
"transform",
"."
] | python | train |
kgori/treeCl | treeCl/distance_matrix.py | https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/distance_matrix.py#L324-L336 | def _embedding_spectral(matrix, dimensions=3, unit_length=True,
affinity_matrix=None, sigma=1):
"""
Private method to calculate Spectral embedding
:param dimensions: (int)
:return: coordinate matrix (np.array)
"""
if affinity_matrix is None:
aff = rbf(matrix, sigma=sigma)
else:
aff = affinity_matrix
coords = sklearn.manifold.spectral_embedding(aff, dimensions)
return normalise_rows(coords) if unit_length else coords | [
"def",
"_embedding_spectral",
"(",
"matrix",
",",
"dimensions",
"=",
"3",
",",
"unit_length",
"=",
"True",
",",
"affinity_matrix",
"=",
"None",
",",
"sigma",
"=",
"1",
")",
":",
"if",
"affinity_matrix",
"is",
"None",
":",
"aff",
"=",
"rbf",
"(",
"matrix",
",",
"sigma",
"=",
"sigma",
")",
"else",
":",
"aff",
"=",
"affinity_matrix",
"coords",
"=",
"sklearn",
".",
"manifold",
".",
"spectral_embedding",
"(",
"aff",
",",
"dimensions",
")",
"return",
"normalise_rows",
"(",
"coords",
")",
"if",
"unit_length",
"else",
"coords"
] | Private method to calculate Spectral embedding
:param dimensions: (int)
:return: coordinate matrix (np.array) | [
"Private",
"method",
"to",
"calculate",
"Spectral",
"embedding",
":",
"param",
"dimensions",
":",
"(",
"int",
")",
":",
"return",
":",
"coordinate",
"matrix",
"(",
"np",
".",
"array",
")"
] | python | train |
MisterWil/skybellpy | skybellpy/__init__.py | https://github.com/MisterWil/skybellpy/blob/ac966d9f590cda7654f6de7eecc94e2103459eef/skybellpy/__init__.py#L123-L146 | def get_devices(self, refresh=False):
"""Get all devices from Abode."""
if refresh or self._devices is None:
if self._devices is None:
self._devices = {}
_LOGGER.info("Updating all devices...")
response = self.send_request("get", CONST.DEVICES_URL)
response_object = json.loads(response.text)
_LOGGER.debug("Get Devices Response: %s", response.text)
for device_json in response_object:
# Attempt to reuse an existing device
device = self._devices.get(device_json['id'])
# No existing device, create a new one
if device:
device.update(device_json)
else:
device = SkybellDevice(device_json, self)
self._devices[device.device_id] = device
return list(self._devices.values()) | [
"def",
"get_devices",
"(",
"self",
",",
"refresh",
"=",
"False",
")",
":",
"if",
"refresh",
"or",
"self",
".",
"_devices",
"is",
"None",
":",
"if",
"self",
".",
"_devices",
"is",
"None",
":",
"self",
".",
"_devices",
"=",
"{",
"}",
"_LOGGER",
".",
"info",
"(",
"\"Updating all devices...\"",
")",
"response",
"=",
"self",
".",
"send_request",
"(",
"\"get\"",
",",
"CONST",
".",
"DEVICES_URL",
")",
"response_object",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"text",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Get Devices Response: %s\"",
",",
"response",
".",
"text",
")",
"for",
"device_json",
"in",
"response_object",
":",
"# Attempt to reuse an existing device",
"device",
"=",
"self",
".",
"_devices",
".",
"get",
"(",
"device_json",
"[",
"'id'",
"]",
")",
"# No existing device, create a new one",
"if",
"device",
":",
"device",
".",
"update",
"(",
"device_json",
")",
"else",
":",
"device",
"=",
"SkybellDevice",
"(",
"device_json",
",",
"self",
")",
"self",
".",
"_devices",
"[",
"device",
".",
"device_id",
"]",
"=",
"device",
"return",
"list",
"(",
"self",
".",
"_devices",
".",
"values",
"(",
")",
")"
] | Get all devices from Abode. | [
"Get",
"all",
"devices",
"from",
"Abode",
"."
] | python | train |
zinic/pynsive | pynsive/reflection.py | https://github.com/zinic/pynsive/blob/15bc8b35a91be5817979eb327427b6235b1b411e/pynsive/reflection.py#L168-L194 | def rlist_modules(mname):
"""
Attempts to the submodules under a module recursively. This function
works for modules located in the default path as well as extended paths
via the sys.meta_path hooks.
This function carries the expectation that the hidden module variable
'__path__' has been set correctly.
:param mname: the module name to descend into
"""
module = import_module(mname)
if not module:
raise ImportError('Unable to load module {}'.format(mname))
found = list()
if _should_use_module_path(module):
mpath = module.__path__[0]
else:
mpaths = sys.path
mpath = _scan_paths_for(mname, mpaths)
if mpath:
for pmname in _search_for_modules(mpath, recursive=True):
found_mod = MODULE_PATH_SEP.join((mname, pmname))
found.append(found_mod)
return found | [
"def",
"rlist_modules",
"(",
"mname",
")",
":",
"module",
"=",
"import_module",
"(",
"mname",
")",
"if",
"not",
"module",
":",
"raise",
"ImportError",
"(",
"'Unable to load module {}'",
".",
"format",
"(",
"mname",
")",
")",
"found",
"=",
"list",
"(",
")",
"if",
"_should_use_module_path",
"(",
"module",
")",
":",
"mpath",
"=",
"module",
".",
"__path__",
"[",
"0",
"]",
"else",
":",
"mpaths",
"=",
"sys",
".",
"path",
"mpath",
"=",
"_scan_paths_for",
"(",
"mname",
",",
"mpaths",
")",
"if",
"mpath",
":",
"for",
"pmname",
"in",
"_search_for_modules",
"(",
"mpath",
",",
"recursive",
"=",
"True",
")",
":",
"found_mod",
"=",
"MODULE_PATH_SEP",
".",
"join",
"(",
"(",
"mname",
",",
"pmname",
")",
")",
"found",
".",
"append",
"(",
"found_mod",
")",
"return",
"found"
] | Attempts to the submodules under a module recursively. This function
works for modules located in the default path as well as extended paths
via the sys.meta_path hooks.
This function carries the expectation that the hidden module variable
'__path__' has been set correctly.
:param mname: the module name to descend into | [
"Attempts",
"to",
"the",
"submodules",
"under",
"a",
"module",
"recursively",
".",
"This",
"function",
"works",
"for",
"modules",
"located",
"in",
"the",
"default",
"path",
"as",
"well",
"as",
"extended",
"paths",
"via",
"the",
"sys",
".",
"meta_path",
"hooks",
"."
] | python | test |
Chilipp/psyplot | psyplot/__main__.py | https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/__main__.py#L24-L46 | def main(args=None):
"""Main function for usage of psyplot from the command line
This function creates a parser that parses command lines to the
:func:`make_plot` functions or (if the ``psyplot_gui`` module is
present, to the :func:`psyplot_gui.start_app` function)
Returns
-------
psyplot.parser.FuncArgParser
The parser that has been used from the command line"""
try:
from psyplot_gui import get_parser as _get_parser
except ImportError:
logger.debug('Failed to import gui', exc_info=True)
parser = get_parser(create=False)
parser.update_arg('output', required=True)
parser.create_arguments()
parser.parse2func(args)
else:
parser = _get_parser(create=False)
parser.create_arguments()
parser.parse_known2func(args) | [
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"try",
":",
"from",
"psyplot_gui",
"import",
"get_parser",
"as",
"_get_parser",
"except",
"ImportError",
":",
"logger",
".",
"debug",
"(",
"'Failed to import gui'",
",",
"exc_info",
"=",
"True",
")",
"parser",
"=",
"get_parser",
"(",
"create",
"=",
"False",
")",
"parser",
".",
"update_arg",
"(",
"'output'",
",",
"required",
"=",
"True",
")",
"parser",
".",
"create_arguments",
"(",
")",
"parser",
".",
"parse2func",
"(",
"args",
")",
"else",
":",
"parser",
"=",
"_get_parser",
"(",
"create",
"=",
"False",
")",
"parser",
".",
"create_arguments",
"(",
")",
"parser",
".",
"parse_known2func",
"(",
"args",
")"
] | Main function for usage of psyplot from the command line
This function creates a parser that parses command lines to the
:func:`make_plot` functions or (if the ``psyplot_gui`` module is
present, to the :func:`psyplot_gui.start_app` function)
Returns
-------
psyplot.parser.FuncArgParser
The parser that has been used from the command line | [
"Main",
"function",
"for",
"usage",
"of",
"psyplot",
"from",
"the",
"command",
"line"
] | python | train |
KujiraProject/Flask-PAM | flask_pam/auth.py | https://github.com/KujiraProject/Flask-PAM/blob/d84f90ffd706c0f491af3539cd438e13771ada7e/flask_pam/auth.py#L142-L152 | def get_groups(self, username):
"""Returns list of groups in which user is.
:param username: name of Linux user
"""
groups = []
for group in grp.getgrall():
if username in group.gr_mem:
groups.append(group.gr_name)
return groups | [
"def",
"get_groups",
"(",
"self",
",",
"username",
")",
":",
"groups",
"=",
"[",
"]",
"for",
"group",
"in",
"grp",
".",
"getgrall",
"(",
")",
":",
"if",
"username",
"in",
"group",
".",
"gr_mem",
":",
"groups",
".",
"append",
"(",
"group",
".",
"gr_name",
")",
"return",
"groups"
] | Returns list of groups in which user is.
:param username: name of Linux user | [
"Returns",
"list",
"of",
"groups",
"in",
"which",
"user",
"is",
"."
] | python | train |
slhck/ffmpeg-normalize | ffmpeg_normalize/_ffmpeg_normalize.py | https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_ffmpeg_normalize.py#L141-L161 | def add_media_file(self, input_file, output_file):
"""
Add a media file to normalize
Arguments:
input_file {str} -- Path to input file
output_file {str} -- Path to output file
"""
if not os.path.exists(input_file):
raise FFmpegNormalizeError("file " + input_file + " does not exist")
ext = os.path.splitext(output_file)[1][1:]
if (self.audio_codec is None or 'pcm' in self.audio_codec) and ext in PCM_INCOMPATIBLE_EXTS:
raise FFmpegNormalizeError(
"Output extension {} does not support PCM audio. Please choose a suitable audio codec with the -c:a option.".format(ext)
)
mf = MediaFile(self, input_file, output_file)
self.media_files.append(mf)
self.file_count += 1 | [
"def",
"add_media_file",
"(",
"self",
",",
"input_file",
",",
"output_file",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"input_file",
")",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"file \"",
"+",
"input_file",
"+",
"\" does not exist\"",
")",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"output_file",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"if",
"(",
"self",
".",
"audio_codec",
"is",
"None",
"or",
"'pcm'",
"in",
"self",
".",
"audio_codec",
")",
"and",
"ext",
"in",
"PCM_INCOMPATIBLE_EXTS",
":",
"raise",
"FFmpegNormalizeError",
"(",
"\"Output extension {} does not support PCM audio. Please choose a suitable audio codec with the -c:a option.\"",
".",
"format",
"(",
"ext",
")",
")",
"mf",
"=",
"MediaFile",
"(",
"self",
",",
"input_file",
",",
"output_file",
")",
"self",
".",
"media_files",
".",
"append",
"(",
"mf",
")",
"self",
".",
"file_count",
"+=",
"1"
] | Add a media file to normalize
Arguments:
input_file {str} -- Path to input file
output_file {str} -- Path to output file | [
"Add",
"a",
"media",
"file",
"to",
"normalize"
] | python | train |
python273/telegraph | telegraph/api.py | https://github.com/python273/telegraph/blob/6d45cd6bbae4fdbd85b48ce32626f3c66e9e5ddc/telegraph/api.py#L122-L139 | def get_page(self, path, return_content=True, return_html=True):
""" Get a Telegraph page
:param path: Path to the Telegraph page (in the format Title-12-31,
i.e. everything that comes after https://telegra.ph/)
:param return_content: If true, content field will be returned
:param return_html: If true, returns HTML instead of Nodes list
"""
response = self._telegraph.method('getPage', path=path, values={
'return_content': return_content
})
if return_content and return_html:
response['content'] = nodes_to_html(response['content'])
return response | [
"def",
"get_page",
"(",
"self",
",",
"path",
",",
"return_content",
"=",
"True",
",",
"return_html",
"=",
"True",
")",
":",
"response",
"=",
"self",
".",
"_telegraph",
".",
"method",
"(",
"'getPage'",
",",
"path",
"=",
"path",
",",
"values",
"=",
"{",
"'return_content'",
":",
"return_content",
"}",
")",
"if",
"return_content",
"and",
"return_html",
":",
"response",
"[",
"'content'",
"]",
"=",
"nodes_to_html",
"(",
"response",
"[",
"'content'",
"]",
")",
"return",
"response"
] | Get a Telegraph page
:param path: Path to the Telegraph page (in the format Title-12-31,
i.e. everything that comes after https://telegra.ph/)
:param return_content: If true, content field will be returned
:param return_html: If true, returns HTML instead of Nodes list | [
"Get",
"a",
"Telegraph",
"page"
] | python | train |
pyroscope/pyrocore | src/pyrocore/util/load_config.py | https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/util/load_config.py#L213-L246 | def load(self, optional_cfg_files=None):
""" Actually load the configuation from either the default location or the given directory.
"""
optional_cfg_files = optional_cfg_files or []
# Guard against coding errors
if self._loaded:
raise RuntimeError("INTERNAL ERROR: Attempt to load configuration twice!")
try:
# Load configuration
namespace = {}
self._set_defaults(namespace, optional_cfg_files)
self._load_ini(namespace, os.path.join(self.config_dir, self.CONFIG_INI))
for cfg_file in optional_cfg_files:
if not os.path.isabs(cfg_file):
cfg_file = os.path.join(self.config_dir, cfg_file)
if os.path.exists(cfg_file):
self._load_ini(namespace, cfg_file)
self._validate_namespace(namespace)
self._load_py(namespace, namespace["config_script"])
self._validate_namespace(namespace)
for callback in namespace["config_validator_callbacks"]:
callback()
except ConfigParser.ParsingError as exc:
raise error.UserError(exc)
# Ready to go...
self._loaded = True | [
"def",
"load",
"(",
"self",
",",
"optional_cfg_files",
"=",
"None",
")",
":",
"optional_cfg_files",
"=",
"optional_cfg_files",
"or",
"[",
"]",
"# Guard against coding errors",
"if",
"self",
".",
"_loaded",
":",
"raise",
"RuntimeError",
"(",
"\"INTERNAL ERROR: Attempt to load configuration twice!\"",
")",
"try",
":",
"# Load configuration",
"namespace",
"=",
"{",
"}",
"self",
".",
"_set_defaults",
"(",
"namespace",
",",
"optional_cfg_files",
")",
"self",
".",
"_load_ini",
"(",
"namespace",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"config_dir",
",",
"self",
".",
"CONFIG_INI",
")",
")",
"for",
"cfg_file",
"in",
"optional_cfg_files",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"cfg_file",
")",
":",
"cfg_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"config_dir",
",",
"cfg_file",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cfg_file",
")",
":",
"self",
".",
"_load_ini",
"(",
"namespace",
",",
"cfg_file",
")",
"self",
".",
"_validate_namespace",
"(",
"namespace",
")",
"self",
".",
"_load_py",
"(",
"namespace",
",",
"namespace",
"[",
"\"config_script\"",
"]",
")",
"self",
".",
"_validate_namespace",
"(",
"namespace",
")",
"for",
"callback",
"in",
"namespace",
"[",
"\"config_validator_callbacks\"",
"]",
":",
"callback",
"(",
")",
"except",
"ConfigParser",
".",
"ParsingError",
"as",
"exc",
":",
"raise",
"error",
".",
"UserError",
"(",
"exc",
")",
"# Ready to go...",
"self",
".",
"_loaded",
"=",
"True"
] | Actually load the configuation from either the default location or the given directory. | [
"Actually",
"load",
"the",
"configuation",
"from",
"either",
"the",
"default",
"location",
"or",
"the",
"given",
"directory",
"."
] | python | train |
mdiener/grace | grace/py27/slimit/parser.py | https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/parser.py#L1003-L1008 | def p_iteration_statement_4(self, p):
"""
iteration_statement \
: FOR LPAREN left_hand_side_expr IN expr RPAREN statement
"""
p[0] = ast.ForIn(item=p[3], iterable=p[5], statement=p[7]) | [
"def",
"p_iteration_statement_4",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"ForIn",
"(",
"item",
"=",
"p",
"[",
"3",
"]",
",",
"iterable",
"=",
"p",
"[",
"5",
"]",
",",
"statement",
"=",
"p",
"[",
"7",
"]",
")"
] | iteration_statement \
: FOR LPAREN left_hand_side_expr IN expr RPAREN statement | [
"iteration_statement",
"\\",
":",
"FOR",
"LPAREN",
"left_hand_side_expr",
"IN",
"expr",
"RPAREN",
"statement"
] | python | train |
petebachant/PXL | pxl/timeseries.py | https://github.com/petebachant/PXL/blob/d7d06cb74422e1ac0154741351fbecea080cfcc0/pxl/timeseries.py#L175-L196 | def calc_multi_exp_unc(sys_unc, n, mean, std, dof, confidence=0.95):
"""Calculate expanded uncertainty using values from multiple runs.
Note that this function assumes the statistic is a mean value, therefore
the combined standard deviation is divided by `sqrt(N)`.
Parameters
----------
sys_unc : numpy array of systematic uncertainties
n : numpy array of numbers of samples per set
std : numpy array of sample standard deviations
dof : numpy array of degrees of freedom
confidence : Confidence interval for t-statistic
"""
sys_unc = sys_unc.mean()
std_combined = combine_std(n, mean, std)
std_combined /= np.sqrt(n.sum())
std_unc_combined = np.sqrt(std_combined**2 + sys_unc**2)
dof = dof.sum()
t_combined = scipy.stats.t.interval(alpha=confidence, df=dof)[-1]
exp_unc_combined = t_combined*std_unc_combined
return exp_unc_combined | [
"def",
"calc_multi_exp_unc",
"(",
"sys_unc",
",",
"n",
",",
"mean",
",",
"std",
",",
"dof",
",",
"confidence",
"=",
"0.95",
")",
":",
"sys_unc",
"=",
"sys_unc",
".",
"mean",
"(",
")",
"std_combined",
"=",
"combine_std",
"(",
"n",
",",
"mean",
",",
"std",
")",
"std_combined",
"/=",
"np",
".",
"sqrt",
"(",
"n",
".",
"sum",
"(",
")",
")",
"std_unc_combined",
"=",
"np",
".",
"sqrt",
"(",
"std_combined",
"**",
"2",
"+",
"sys_unc",
"**",
"2",
")",
"dof",
"=",
"dof",
".",
"sum",
"(",
")",
"t_combined",
"=",
"scipy",
".",
"stats",
".",
"t",
".",
"interval",
"(",
"alpha",
"=",
"confidence",
",",
"df",
"=",
"dof",
")",
"[",
"-",
"1",
"]",
"exp_unc_combined",
"=",
"t_combined",
"*",
"std_unc_combined",
"return",
"exp_unc_combined"
] | Calculate expanded uncertainty using values from multiple runs.
Note that this function assumes the statistic is a mean value, therefore
the combined standard deviation is divided by `sqrt(N)`.
Parameters
----------
sys_unc : numpy array of systematic uncertainties
n : numpy array of numbers of samples per set
std : numpy array of sample standard deviations
dof : numpy array of degrees of freedom
confidence : Confidence interval for t-statistic | [
"Calculate",
"expanded",
"uncertainty",
"using",
"values",
"from",
"multiple",
"runs",
".",
"Note",
"that",
"this",
"function",
"assumes",
"the",
"statistic",
"is",
"a",
"mean",
"value",
"therefore",
"the",
"combined",
"standard",
"deviation",
"is",
"divided",
"by",
"sqrt",
"(",
"N",
")",
".",
"Parameters",
"----------",
"sys_unc",
":",
"numpy",
"array",
"of",
"systematic",
"uncertainties",
"n",
":",
"numpy",
"array",
"of",
"numbers",
"of",
"samples",
"per",
"set",
"std",
":",
"numpy",
"array",
"of",
"sample",
"standard",
"deviations",
"dof",
":",
"numpy",
"array",
"of",
"degrees",
"of",
"freedom",
"confidence",
":",
"Confidence",
"interval",
"for",
"t",
"-",
"statistic"
] | python | train |
minhhoit/yacms | yacms/blog/admin.py | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/blog/admin.py#L54-L61 | def has_module_permission(self, request):
"""
Hide from the admin menu unless explicitly set in ``ADMIN_MENU_ORDER``.
"""
for (name, items) in settings.ADMIN_MENU_ORDER:
if "blog.BlogCategory" in items:
return True
return False | [
"def",
"has_module_permission",
"(",
"self",
",",
"request",
")",
":",
"for",
"(",
"name",
",",
"items",
")",
"in",
"settings",
".",
"ADMIN_MENU_ORDER",
":",
"if",
"\"blog.BlogCategory\"",
"in",
"items",
":",
"return",
"True",
"return",
"False"
] | Hide from the admin menu unless explicitly set in ``ADMIN_MENU_ORDER``. | [
"Hide",
"from",
"the",
"admin",
"menu",
"unless",
"explicitly",
"set",
"in",
"ADMIN_MENU_ORDER",
"."
] | python | train |
limpyd/redis-limpyd-jobs | limpyd_jobs/workers.py | https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L376-L443 | def _main_loop(self):
"""
Run jobs until must_stop returns True
"""
fetch_priorities_delay = timedelta(seconds=self.fetch_priorities_delay)
fetch_delayed_delay = timedelta(seconds=self.fetch_delayed_delay)
while not self.must_stop():
self.set_status('waiting')
if self.last_update_keys + fetch_priorities_delay < datetime.utcnow():
self.update_keys()
if self.last_requeue_delayed + fetch_delayed_delay < datetime.utcnow():
self.requeue_delayed_jobs()
try:
queue_and_job = self.wait_for_job()
if queue_and_job is None:
# timeout for blpop
continue
queue, job = queue_and_job
except Exception as e:
self.log('Unable to get job: %s\n%s'
% (str(e), traceback.format_exc()), level='error')
else:
self.num_loops += 1
try:
identifier = 'pk:%s' % job.pk.get()
except Exception as e:
identifier = '??'
try:
self.set_status('running')
identifier, status = job.hmget('identifier', 'status')
# some cache, don't count on it on subclasses
job._cached_identifier = identifier
job._cached_status = status
queue._cached_name = queue.name.hget()
if status == STATUSES.DELAYED:
self.job_delayed(job, queue)
elif status != STATUSES.WAITING:
self.job_skipped(job, queue)
else:
try:
self.job_started(job, queue)
job_result = self.callback(job, queue)
except Exception as e:
trace = None
if self.save_tracebacks:
trace = traceback.format_exc()
self.job_error(job, queue, e, trace)
else:
job._cached_status = job.status.hget()
if job._cached_status == STATUSES.DELAYED:
self.job_delayed(job, queue)
elif job._cached_status == STATUSES.CANCELED:
self.job_skipped(job, queue)
else:
self.job_success(job, queue, job_result)
except Exception as e:
self.log('[%s] unexpected error: %s\n%s'
% (identifier, str(e), traceback.format_exc()), level='error')
try:
queue.errors.rpush(job.ident)
except Exception as e:
self.log('[%s] unable to add the error in the queue: %s\n%s'
% (identifier, str(e), traceback.format_exc()), level='error') | [
"def",
"_main_loop",
"(",
"self",
")",
":",
"fetch_priorities_delay",
"=",
"timedelta",
"(",
"seconds",
"=",
"self",
".",
"fetch_priorities_delay",
")",
"fetch_delayed_delay",
"=",
"timedelta",
"(",
"seconds",
"=",
"self",
".",
"fetch_delayed_delay",
")",
"while",
"not",
"self",
".",
"must_stop",
"(",
")",
":",
"self",
".",
"set_status",
"(",
"'waiting'",
")",
"if",
"self",
".",
"last_update_keys",
"+",
"fetch_priorities_delay",
"<",
"datetime",
".",
"utcnow",
"(",
")",
":",
"self",
".",
"update_keys",
"(",
")",
"if",
"self",
".",
"last_requeue_delayed",
"+",
"fetch_delayed_delay",
"<",
"datetime",
".",
"utcnow",
"(",
")",
":",
"self",
".",
"requeue_delayed_jobs",
"(",
")",
"try",
":",
"queue_and_job",
"=",
"self",
".",
"wait_for_job",
"(",
")",
"if",
"queue_and_job",
"is",
"None",
":",
"# timeout for blpop",
"continue",
"queue",
",",
"job",
"=",
"queue_and_job",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"'Unable to get job: %s\\n%s'",
"%",
"(",
"str",
"(",
"e",
")",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
",",
"level",
"=",
"'error'",
")",
"else",
":",
"self",
".",
"num_loops",
"+=",
"1",
"try",
":",
"identifier",
"=",
"'pk:%s'",
"%",
"job",
".",
"pk",
".",
"get",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"identifier",
"=",
"'??'",
"try",
":",
"self",
".",
"set_status",
"(",
"'running'",
")",
"identifier",
",",
"status",
"=",
"job",
".",
"hmget",
"(",
"'identifier'",
",",
"'status'",
")",
"# some cache, don't count on it on subclasses",
"job",
".",
"_cached_identifier",
"=",
"identifier",
"job",
".",
"_cached_status",
"=",
"status",
"queue",
".",
"_cached_name",
"=",
"queue",
".",
"name",
".",
"hget",
"(",
")",
"if",
"status",
"==",
"STATUSES",
".",
"DELAYED",
":",
"self",
".",
"job_delayed",
"(",
"job",
",",
"queue",
")",
"elif",
"status",
"!=",
"STATUSES",
".",
"WAITING",
":",
"self",
".",
"job_skipped",
"(",
"job",
",",
"queue",
")",
"else",
":",
"try",
":",
"self",
".",
"job_started",
"(",
"job",
",",
"queue",
")",
"job_result",
"=",
"self",
".",
"callback",
"(",
"job",
",",
"queue",
")",
"except",
"Exception",
"as",
"e",
":",
"trace",
"=",
"None",
"if",
"self",
".",
"save_tracebacks",
":",
"trace",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"self",
".",
"job_error",
"(",
"job",
",",
"queue",
",",
"e",
",",
"trace",
")",
"else",
":",
"job",
".",
"_cached_status",
"=",
"job",
".",
"status",
".",
"hget",
"(",
")",
"if",
"job",
".",
"_cached_status",
"==",
"STATUSES",
".",
"DELAYED",
":",
"self",
".",
"job_delayed",
"(",
"job",
",",
"queue",
")",
"elif",
"job",
".",
"_cached_status",
"==",
"STATUSES",
".",
"CANCELED",
":",
"self",
".",
"job_skipped",
"(",
"job",
",",
"queue",
")",
"else",
":",
"self",
".",
"job_success",
"(",
"job",
",",
"queue",
",",
"job_result",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"'[%s] unexpected error: %s\\n%s'",
"%",
"(",
"identifier",
",",
"str",
"(",
"e",
")",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
",",
"level",
"=",
"'error'",
")",
"try",
":",
"queue",
".",
"errors",
".",
"rpush",
"(",
"job",
".",
"ident",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
"(",
"'[%s] unable to add the error in the queue: %s\\n%s'",
"%",
"(",
"identifier",
",",
"str",
"(",
"e",
")",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
",",
"level",
"=",
"'error'",
")"
] | Run jobs until must_stop returns True | [
"Run",
"jobs",
"until",
"must_stop",
"returns",
"True"
] | python | train |
quantopian/alphalens | alphalens/performance.py | https://github.com/quantopian/alphalens/blob/d43eac871bb061e956df936794d3dd514da99e44/alphalens/performance.py#L128-L204 | def factor_weights(factor_data,
demeaned=True,
group_adjust=False,
equal_weight=False):
"""
Computes asset weights by factor values and dividing by the sum of their
absolute value (achieving gross leverage of 1). Positive factor values will
results in positive weights and negative values in negative weights.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
demeaned : bool
Should this computation happen on a long short portfolio? if True,
weights are computed by demeaning factor values and dividing by the sum
of their absolute value (achieving gross leverage of 1). The sum of
positive weights will be the same as the negative weights (absolute
value), suitable for a dollar neutral long-short portfolio
group_adjust : bool
Should this computation happen on a group neutral portfolio? If True,
compute group neutral weights: each group will weight the same and
if 'demeaned' is enabled the factor values demeaning will occur on the
group level.
equal_weight : bool, optional
if True the assets will be equal-weighted instead of factor-weighted
If demeaned is True then the factor universe will be split in two
equal sized groups, top assets with positive weights and bottom assets
with negative weights
Returns
-------
returns : pd.Series
Assets weighted by factor value.
"""
def to_weights(group, _demeaned, _equal_weight):
if _equal_weight:
group = group.copy()
if _demeaned:
# top assets positive weights, bottom ones negative
group = group - group.median()
negative_mask = group < 0
group[negative_mask] = -1.0
positive_mask = group > 0
group[positive_mask] = 1.0
if _demeaned:
# positive weights must equal negative weights
if negative_mask.any():
group[negative_mask] /= negative_mask.sum()
if positive_mask.any():
group[positive_mask] /= positive_mask.sum()
elif _demeaned:
group = group - group.mean()
return group / group.abs().sum()
grouper = [factor_data.index.get_level_values('date')]
if group_adjust:
grouper.append('group')
weights = factor_data.groupby(grouper)['factor'] \
.apply(to_weights, demeaned, equal_weight)
if group_adjust:
weights = weights.groupby(level='date').apply(to_weights, False, False)
return weights | [
"def",
"factor_weights",
"(",
"factor_data",
",",
"demeaned",
"=",
"True",
",",
"group_adjust",
"=",
"False",
",",
"equal_weight",
"=",
"False",
")",
":",
"def",
"to_weights",
"(",
"group",
",",
"_demeaned",
",",
"_equal_weight",
")",
":",
"if",
"_equal_weight",
":",
"group",
"=",
"group",
".",
"copy",
"(",
")",
"if",
"_demeaned",
":",
"# top assets positive weights, bottom ones negative",
"group",
"=",
"group",
"-",
"group",
".",
"median",
"(",
")",
"negative_mask",
"=",
"group",
"<",
"0",
"group",
"[",
"negative_mask",
"]",
"=",
"-",
"1.0",
"positive_mask",
"=",
"group",
">",
"0",
"group",
"[",
"positive_mask",
"]",
"=",
"1.0",
"if",
"_demeaned",
":",
"# positive weights must equal negative weights",
"if",
"negative_mask",
".",
"any",
"(",
")",
":",
"group",
"[",
"negative_mask",
"]",
"/=",
"negative_mask",
".",
"sum",
"(",
")",
"if",
"positive_mask",
".",
"any",
"(",
")",
":",
"group",
"[",
"positive_mask",
"]",
"/=",
"positive_mask",
".",
"sum",
"(",
")",
"elif",
"_demeaned",
":",
"group",
"=",
"group",
"-",
"group",
".",
"mean",
"(",
")",
"return",
"group",
"/",
"group",
".",
"abs",
"(",
")",
".",
"sum",
"(",
")",
"grouper",
"=",
"[",
"factor_data",
".",
"index",
".",
"get_level_values",
"(",
"'date'",
")",
"]",
"if",
"group_adjust",
":",
"grouper",
".",
"append",
"(",
"'group'",
")",
"weights",
"=",
"factor_data",
".",
"groupby",
"(",
"grouper",
")",
"[",
"'factor'",
"]",
".",
"apply",
"(",
"to_weights",
",",
"demeaned",
",",
"equal_weight",
")",
"if",
"group_adjust",
":",
"weights",
"=",
"weights",
".",
"groupby",
"(",
"level",
"=",
"'date'",
")",
".",
"apply",
"(",
"to_weights",
",",
"False",
",",
"False",
")",
"return",
"weights"
] | Computes asset weights by factor values and dividing by the sum of their
absolute value (achieving gross leverage of 1). Positive factor values will
results in positive weights and negative values in negative weights.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
demeaned : bool
Should this computation happen on a long short portfolio? if True,
weights are computed by demeaning factor values and dividing by the sum
of their absolute value (achieving gross leverage of 1). The sum of
positive weights will be the same as the negative weights (absolute
value), suitable for a dollar neutral long-short portfolio
group_adjust : bool
Should this computation happen on a group neutral portfolio? If True,
compute group neutral weights: each group will weight the same and
if 'demeaned' is enabled the factor values demeaning will occur on the
group level.
equal_weight : bool, optional
if True the assets will be equal-weighted instead of factor-weighted
If demeaned is True then the factor universe will be split in two
equal sized groups, top assets with positive weights and bottom assets
with negative weights
Returns
-------
returns : pd.Series
Assets weighted by factor value. | [
"Computes",
"asset",
"weights",
"by",
"factor",
"values",
"and",
"dividing",
"by",
"the",
"sum",
"of",
"their",
"absolute",
"value",
"(",
"achieving",
"gross",
"leverage",
"of",
"1",
")",
".",
"Positive",
"factor",
"values",
"will",
"results",
"in",
"positive",
"weights",
"and",
"negative",
"values",
"in",
"negative",
"weights",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/passa/internals/utils.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/passa/internals/utils.py#L20-L59 | def get_pinned_version(ireq):
"""Get the pinned version of an InstallRequirement.
An InstallRequirement is considered pinned if:
- Is not editable
- It has exactly one specifier
- That specifier is "=="
- The version does not contain a wildcard
Examples:
django==1.8 # pinned
django>1.8 # NOT pinned
django~=1.8 # NOT pinned
django==1.* # NOT pinned
Raises `TypeError` if the input is not a valid InstallRequirement, or
`ValueError` if the InstallRequirement is not pinned.
"""
try:
specifier = ireq.specifier
except AttributeError:
raise TypeError("Expected InstallRequirement, not {}".format(
type(ireq).__name__,
))
if ireq.editable:
raise ValueError("InstallRequirement is editable")
if not specifier:
raise ValueError("InstallRequirement has no version specification")
if len(specifier._specs) != 1:
raise ValueError("InstallRequirement has multiple specifications")
op, version = next(iter(specifier._specs))._spec
if op not in ('==', '===') or version.endswith('.*'):
raise ValueError("InstallRequirement not pinned (is {0!r})".format(
op + version,
))
return version | [
"def",
"get_pinned_version",
"(",
"ireq",
")",
":",
"try",
":",
"specifier",
"=",
"ireq",
".",
"specifier",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"\"Expected InstallRequirement, not {}\"",
".",
"format",
"(",
"type",
"(",
"ireq",
")",
".",
"__name__",
",",
")",
")",
"if",
"ireq",
".",
"editable",
":",
"raise",
"ValueError",
"(",
"\"InstallRequirement is editable\"",
")",
"if",
"not",
"specifier",
":",
"raise",
"ValueError",
"(",
"\"InstallRequirement has no version specification\"",
")",
"if",
"len",
"(",
"specifier",
".",
"_specs",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"InstallRequirement has multiple specifications\"",
")",
"op",
",",
"version",
"=",
"next",
"(",
"iter",
"(",
"specifier",
".",
"_specs",
")",
")",
".",
"_spec",
"if",
"op",
"not",
"in",
"(",
"'=='",
",",
"'==='",
")",
"or",
"version",
".",
"endswith",
"(",
"'.*'",
")",
":",
"raise",
"ValueError",
"(",
"\"InstallRequirement not pinned (is {0!r})\"",
".",
"format",
"(",
"op",
"+",
"version",
",",
")",
")",
"return",
"version"
] | Get the pinned version of an InstallRequirement.
An InstallRequirement is considered pinned if:
- Is not editable
- It has exactly one specifier
- That specifier is "=="
- The version does not contain a wildcard
Examples:
django==1.8 # pinned
django>1.8 # NOT pinned
django~=1.8 # NOT pinned
django==1.* # NOT pinned
Raises `TypeError` if the input is not a valid InstallRequirement, or
`ValueError` if the InstallRequirement is not pinned. | [
"Get",
"the",
"pinned",
"version",
"of",
"an",
"InstallRequirement",
"."
] | python | train |
anntzer/mplcursors | lib/mplcursors/_pick_info.py | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L255-L270 | def _untransform(orig_xy, screen_xy, ax):
"""
Return data coordinates to place an annotation at screen coordinates
*screen_xy* in axes *ax*.
*orig_xy* are the "original" coordinates as stored by the artist; they are
transformed to *screen_xy* by whatever transform the artist uses. If the
artist uses ``ax.transData``, just return *orig_xy*; else, apply
``ax.transData.inverse()`` to *screen_xy*. (The first case is more
accurate than always applying ``ax.transData.inverse()``.)
"""
tr_xy = ax.transData.transform(orig_xy)
return (
orig_xy
if ((tr_xy == screen_xy) | np.isnan(tr_xy) & np.isnan(screen_xy)).all()
else ax.transData.inverted().transform(screen_xy)) | [
"def",
"_untransform",
"(",
"orig_xy",
",",
"screen_xy",
",",
"ax",
")",
":",
"tr_xy",
"=",
"ax",
".",
"transData",
".",
"transform",
"(",
"orig_xy",
")",
"return",
"(",
"orig_xy",
"if",
"(",
"(",
"tr_xy",
"==",
"screen_xy",
")",
"|",
"np",
".",
"isnan",
"(",
"tr_xy",
")",
"&",
"np",
".",
"isnan",
"(",
"screen_xy",
")",
")",
".",
"all",
"(",
")",
"else",
"ax",
".",
"transData",
".",
"inverted",
"(",
")",
".",
"transform",
"(",
"screen_xy",
")",
")"
] | Return data coordinates to place an annotation at screen coordinates
*screen_xy* in axes *ax*.
*orig_xy* are the "original" coordinates as stored by the artist; they are
transformed to *screen_xy* by whatever transform the artist uses. If the
artist uses ``ax.transData``, just return *orig_xy*; else, apply
``ax.transData.inverse()`` to *screen_xy*. (The first case is more
accurate than always applying ``ax.transData.inverse()``.) | [
"Return",
"data",
"coordinates",
"to",
"place",
"an",
"annotation",
"at",
"screen",
"coordinates",
"*",
"screen_xy",
"*",
"in",
"axes",
"*",
"ax",
"*",
"."
] | python | train |
alvinwan/TexSoup | TexSoup/reader.py | https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/reader.py#L158-L165 | def tokenize_argument(text):
"""Process both optional and required arguments.
:param Buffer text: iterator over line, with current position
"""
for delim in ARG_TOKENS:
if text.startswith(delim):
return text.forward(len(delim)) | [
"def",
"tokenize_argument",
"(",
"text",
")",
":",
"for",
"delim",
"in",
"ARG_TOKENS",
":",
"if",
"text",
".",
"startswith",
"(",
"delim",
")",
":",
"return",
"text",
".",
"forward",
"(",
"len",
"(",
"delim",
")",
")"
] | Process both optional and required arguments.
:param Buffer text: iterator over line, with current position | [
"Process",
"both",
"optional",
"and",
"required",
"arguments",
"."
] | python | train |
bear/parsedatetime | parsedatetime/__init__.py | https://github.com/bear/parsedatetime/blob/830775dc5e36395622b41f12317f5e10c303d3a2/parsedatetime/__init__.py#L1155-L1179 | def _evalWeekday(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseWeekday()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
# Given string is a weekday
yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
self.currentContext.updateAccuracy(pdtContext.ACU_DAY)
target = start + datetime.timedelta(days=qty)
return target.timetuple() | [
"def",
"_evalWeekday",
"(",
"self",
",",
"datetimeString",
",",
"sourceTime",
")",
":",
"s",
"=",
"datetimeString",
".",
"strip",
"(",
")",
"sourceTime",
"=",
"self",
".",
"_evalDT",
"(",
"datetimeString",
",",
"sourceTime",
")",
"# Given string is a weekday",
"yr",
",",
"mth",
",",
"dy",
",",
"hr",
",",
"mn",
",",
"sec",
",",
"wd",
",",
"yd",
",",
"isdst",
"=",
"sourceTime",
"start",
"=",
"datetime",
".",
"datetime",
"(",
"yr",
",",
"mth",
",",
"dy",
",",
"hr",
",",
"mn",
",",
"sec",
")",
"wkdy",
"=",
"self",
".",
"ptc",
".",
"WeekdayOffsets",
"[",
"s",
"]",
"if",
"wkdy",
">",
"wd",
":",
"qty",
"=",
"self",
".",
"_CalculateDOWDelta",
"(",
"wd",
",",
"wkdy",
",",
"2",
",",
"self",
".",
"ptc",
".",
"DOWParseStyle",
",",
"self",
".",
"ptc",
".",
"CurrentDOWParseStyle",
")",
"else",
":",
"qty",
"=",
"self",
".",
"_CalculateDOWDelta",
"(",
"wd",
",",
"wkdy",
",",
"2",
",",
"self",
".",
"ptc",
".",
"DOWParseStyle",
",",
"self",
".",
"ptc",
".",
"CurrentDOWParseStyle",
")",
"self",
".",
"currentContext",
".",
"updateAccuracy",
"(",
"pdtContext",
".",
"ACU_DAY",
")",
"target",
"=",
"start",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"qty",
")",
"return",
"target",
".",
"timetuple",
"(",
")"
] | Evaluate text passed by L{_partialParseWeekday()} | [
"Evaluate",
"text",
"passed",
"by",
"L",
"{",
"_partialParseWeekday",
"()",
"}"
] | python | train |
AtteqCom/zsl | src/zsl/utils/string_helper.py | https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/utils/string_helper.py#L85-L104 | def addslashes(s, escaped_chars=None):
"""Add slashes for given characters. Default is for ``\`` and ``'``.
:param s: string
:param escaped_chars: list of characters to prefix with a slash ``\``
:return: string with slashed characters
:rtype: str
:Example:
>>> addslashes("'")
"\\'"
"""
if escaped_chars is None:
escaped_chars = ["\\", "'", ]
# l = ["\\", '"', "'", "\0", ]
for i in escaped_chars:
if i in s:
s = s.replace(i, '\\' + i)
return s | [
"def",
"addslashes",
"(",
"s",
",",
"escaped_chars",
"=",
"None",
")",
":",
"if",
"escaped_chars",
"is",
"None",
":",
"escaped_chars",
"=",
"[",
"\"\\\\\"",
",",
"\"'\"",
",",
"]",
"# l = [\"\\\\\", '\"', \"'\", \"\\0\", ]",
"for",
"i",
"in",
"escaped_chars",
":",
"if",
"i",
"in",
"s",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"i",
",",
"'\\\\'",
"+",
"i",
")",
"return",
"s"
] | Add slashes for given characters. Default is for ``\`` and ``'``.
:param s: string
:param escaped_chars: list of characters to prefix with a slash ``\``
:return: string with slashed characters
:rtype: str
:Example:
>>> addslashes("'")
"\\'" | [
"Add",
"slashes",
"for",
"given",
"characters",
".",
"Default",
"is",
"for",
"\\",
"and",
"."
] | python | train |
wummel/linkchecker | linkcheck/plugins/parseword.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/plugins/parseword.py#L93-L96 | def open_wordfile (app, filename):
"""Open given Word file with application object."""
return app.Documents.Open(filename, ReadOnly=True,
AddToRecentFiles=False, Visible=False, NoEncodingDialog=True) | [
"def",
"open_wordfile",
"(",
"app",
",",
"filename",
")",
":",
"return",
"app",
".",
"Documents",
".",
"Open",
"(",
"filename",
",",
"ReadOnly",
"=",
"True",
",",
"AddToRecentFiles",
"=",
"False",
",",
"Visible",
"=",
"False",
",",
"NoEncodingDialog",
"=",
"True",
")"
] | Open given Word file with application object. | [
"Open",
"given",
"Word",
"file",
"with",
"application",
"object",
"."
] | python | train |
prechelt/typecheck-decorator | typecheck/framework.py | https://github.com/prechelt/typecheck-decorator/blob/4aa5a7f17235c70b5b787c9e80bb1f24d3f15933/typecheck/framework.py#L96-L121 | def is_compatible(self, typevar, its_type):
"""
Checks whether its_type conforms to typevar.
If the typevar is not yet bound, it will be bound to its_type.
The covariance/contravariance checking described in the respective section
of PEP484 applies to declared types, but here we have actual types;
therefore, (1) subtypes are always compatible, (2) we may have to
rebind the type variable to supertypes of the current binding several
times until the required most general binding is found.
"""
result = True
binding = self.binding_of(typevar) # may or may not exist
if binding is None:
self.bind(typevar, its_type) # initial binding, OK
elif issubclass(binding, its_type):
self.bind(typevar, its_type) # rebind to supertype, OK
elif not issubclass(its_type, binding): # accept like TypeChecker
return False
binding = self.binding_of(typevar) # will now exist
if (typevar.__bound__ and
not issubclass(binding, typevar.__bound__)):
return False # bound violation
if (len(typevar.__constraints__) > 0 and
not issubclass(binding, tg.Union[typevar.__constraints__])):
return False # constraint violation
return True | [
"def",
"is_compatible",
"(",
"self",
",",
"typevar",
",",
"its_type",
")",
":",
"result",
"=",
"True",
"binding",
"=",
"self",
".",
"binding_of",
"(",
"typevar",
")",
"# may or may not exist",
"if",
"binding",
"is",
"None",
":",
"self",
".",
"bind",
"(",
"typevar",
",",
"its_type",
")",
"# initial binding, OK",
"elif",
"issubclass",
"(",
"binding",
",",
"its_type",
")",
":",
"self",
".",
"bind",
"(",
"typevar",
",",
"its_type",
")",
"# rebind to supertype, OK",
"elif",
"not",
"issubclass",
"(",
"its_type",
",",
"binding",
")",
":",
"# accept like TypeChecker",
"return",
"False",
"binding",
"=",
"self",
".",
"binding_of",
"(",
"typevar",
")",
"# will now exist",
"if",
"(",
"typevar",
".",
"__bound__",
"and",
"not",
"issubclass",
"(",
"binding",
",",
"typevar",
".",
"__bound__",
")",
")",
":",
"return",
"False",
"# bound violation",
"if",
"(",
"len",
"(",
"typevar",
".",
"__constraints__",
")",
">",
"0",
"and",
"not",
"issubclass",
"(",
"binding",
",",
"tg",
".",
"Union",
"[",
"typevar",
".",
"__constraints__",
"]",
")",
")",
":",
"return",
"False",
"# constraint violation",
"return",
"True"
] | Checks whether its_type conforms to typevar.
If the typevar is not yet bound, it will be bound to its_type.
The covariance/contravariance checking described in the respective section
of PEP484 applies to declared types, but here we have actual types;
therefore, (1) subtypes are always compatible, (2) we may have to
rebind the type variable to supertypes of the current binding several
times until the required most general binding is found. | [
"Checks",
"whether",
"its_type",
"conforms",
"to",
"typevar",
".",
"If",
"the",
"typevar",
"is",
"not",
"yet",
"bound",
"it",
"will",
"be",
"bound",
"to",
"its_type",
".",
"The",
"covariance",
"/",
"contravariance",
"checking",
"described",
"in",
"the",
"respective",
"section",
"of",
"PEP484",
"applies",
"to",
"declared",
"types",
"but",
"here",
"we",
"have",
"actual",
"types",
";",
"therefore",
"(",
"1",
")",
"subtypes",
"are",
"always",
"compatible",
"(",
"2",
")",
"we",
"may",
"have",
"to",
"rebind",
"the",
"type",
"variable",
"to",
"supertypes",
"of",
"the",
"current",
"binding",
"several",
"times",
"until",
"the",
"required",
"most",
"general",
"binding",
"is",
"found",
"."
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/rotmat.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/rotmat.py#L313-L323 | def from_two_vectors(self, vec1, vec2):
'''get a rotation matrix from two vectors.
This returns a rotation matrix which when applied to vec1
will produce a vector pointing in the same direction as vec2'''
angle = vec1.angle(vec2)
cross = vec1 % vec2
if cross.length() == 0:
# the two vectors are colinear
return self.from_euler(0,0,angle)
cross.normalize()
return self.from_axis_angle(cross, angle) | [
"def",
"from_two_vectors",
"(",
"self",
",",
"vec1",
",",
"vec2",
")",
":",
"angle",
"=",
"vec1",
".",
"angle",
"(",
"vec2",
")",
"cross",
"=",
"vec1",
"%",
"vec2",
"if",
"cross",
".",
"length",
"(",
")",
"==",
"0",
":",
"# the two vectors are colinear",
"return",
"self",
".",
"from_euler",
"(",
"0",
",",
"0",
",",
"angle",
")",
"cross",
".",
"normalize",
"(",
")",
"return",
"self",
".",
"from_axis_angle",
"(",
"cross",
",",
"angle",
")"
] | get a rotation matrix from two vectors.
This returns a rotation matrix which when applied to vec1
will produce a vector pointing in the same direction as vec2 | [
"get",
"a",
"rotation",
"matrix",
"from",
"two",
"vectors",
".",
"This",
"returns",
"a",
"rotation",
"matrix",
"which",
"when",
"applied",
"to",
"vec1",
"will",
"produce",
"a",
"vector",
"pointing",
"in",
"the",
"same",
"direction",
"as",
"vec2"
] | python | train |
theislab/anndata | anndata/readwrite/write.py | https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/readwrite/write.py#L20-L67 | def write_csvs(dirname: PathLike, adata: AnnData, skip_data: bool = True, sep: str = ','):
"""See :meth:`~anndata.AnnData.write_csvs`.
"""
dirname = Path(dirname)
if dirname.suffix == '.csv':
dirname = dirname.with_suffix('')
logger.info("writing '.csv' files to %s", dirname)
if not dirname.is_dir():
dirname.mkdir(parents=True, exist_ok=True)
dir_uns = dirname / 'uns'
if not dir_uns.is_dir():
dir_uns.mkdir(parents=True, exist_ok=True)
d = dict(
obs=adata._obs,
var=adata._var,
obsm=adata._obsm.to_df(),
varm=adata._varm.to_df(),
)
if not skip_data:
d['X'] = pd.DataFrame(
adata._X.toarray() if issparse(adata._X) else adata._X)
d_write = {**d, **adata._uns}
not_yet_raised_sparse_warning = True
for key, value in d_write.items():
if issparse(value):
if not_yet_raised_sparse_warning:
warnings.warn('Omitting to write sparse annotation.')
not_yet_raised_sparse_warning = False
continue
filename = dirname
if key not in {'X', 'var', 'obs', 'obsm', 'varm'}:
filename = dir_uns
filename /= '{}.csv'.format(key)
df = value
if not isinstance(value, pd.DataFrame):
value = np.array(value)
if np.ndim(value) == 0:
value = value[None]
try:
df = pd.DataFrame(value)
except Exception as e:
warnings.warn('Omitting to write {!r}.'.format(key), type(e))
continue
df.to_csv(
filename, sep=sep,
header=key in {'obs', 'var', 'obsm', 'varm'},
index=key in {'obs', 'var'},
) | [
"def",
"write_csvs",
"(",
"dirname",
":",
"PathLike",
",",
"adata",
":",
"AnnData",
",",
"skip_data",
":",
"bool",
"=",
"True",
",",
"sep",
":",
"str",
"=",
"','",
")",
":",
"dirname",
"=",
"Path",
"(",
"dirname",
")",
"if",
"dirname",
".",
"suffix",
"==",
"'.csv'",
":",
"dirname",
"=",
"dirname",
".",
"with_suffix",
"(",
"''",
")",
"logger",
".",
"info",
"(",
"\"writing '.csv' files to %s\"",
",",
"dirname",
")",
"if",
"not",
"dirname",
".",
"is_dir",
"(",
")",
":",
"dirname",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"dir_uns",
"=",
"dirname",
"/",
"'uns'",
"if",
"not",
"dir_uns",
".",
"is_dir",
"(",
")",
":",
"dir_uns",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"d",
"=",
"dict",
"(",
"obs",
"=",
"adata",
".",
"_obs",
",",
"var",
"=",
"adata",
".",
"_var",
",",
"obsm",
"=",
"adata",
".",
"_obsm",
".",
"to_df",
"(",
")",
",",
"varm",
"=",
"adata",
".",
"_varm",
".",
"to_df",
"(",
")",
",",
")",
"if",
"not",
"skip_data",
":",
"d",
"[",
"'X'",
"]",
"=",
"pd",
".",
"DataFrame",
"(",
"adata",
".",
"_X",
".",
"toarray",
"(",
")",
"if",
"issparse",
"(",
"adata",
".",
"_X",
")",
"else",
"adata",
".",
"_X",
")",
"d_write",
"=",
"{",
"*",
"*",
"d",
",",
"*",
"*",
"adata",
".",
"_uns",
"}",
"not_yet_raised_sparse_warning",
"=",
"True",
"for",
"key",
",",
"value",
"in",
"d_write",
".",
"items",
"(",
")",
":",
"if",
"issparse",
"(",
"value",
")",
":",
"if",
"not_yet_raised_sparse_warning",
":",
"warnings",
".",
"warn",
"(",
"'Omitting to write sparse annotation.'",
")",
"not_yet_raised_sparse_warning",
"=",
"False",
"continue",
"filename",
"=",
"dirname",
"if",
"key",
"not",
"in",
"{",
"'X'",
",",
"'var'",
",",
"'obs'",
",",
"'obsm'",
",",
"'varm'",
"}",
":",
"filename",
"=",
"dir_uns",
"filename",
"/=",
"'{}.csv'",
".",
"format",
"(",
"key",
")",
"df",
"=",
"value",
"if",
"not",
"isinstance",
"(",
"value",
",",
"pd",
".",
"DataFrame",
")",
":",
"value",
"=",
"np",
".",
"array",
"(",
"value",
")",
"if",
"np",
".",
"ndim",
"(",
"value",
")",
"==",
"0",
":",
"value",
"=",
"value",
"[",
"None",
"]",
"try",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"value",
")",
"except",
"Exception",
"as",
"e",
":",
"warnings",
".",
"warn",
"(",
"'Omitting to write {!r}.'",
".",
"format",
"(",
"key",
")",
",",
"type",
"(",
"e",
")",
")",
"continue",
"df",
".",
"to_csv",
"(",
"filename",
",",
"sep",
"=",
"sep",
",",
"header",
"=",
"key",
"in",
"{",
"'obs'",
",",
"'var'",
",",
"'obsm'",
",",
"'varm'",
"}",
",",
"index",
"=",
"key",
"in",
"{",
"'obs'",
",",
"'var'",
"}",
",",
")"
] | See :meth:`~anndata.AnnData.write_csvs`. | [
"See",
":",
"meth",
":",
"~anndata",
".",
"AnnData",
".",
"write_csvs",
"."
] | python | train |
Esri/ArcREST | src/arcrest/enrichment/_geoenrichment.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/enrichment/_geoenrichment.py#L445-L546 | def getVariables(self,
sourceCountry,
optionalCountryDataset=None,
searchText=None):
r"""
The GeoEnrichment GetVariables helper method allows you to search
the data collections for variables that contain specific keywords.
To see the comprehensive set of global Esri Demographics data that
are available, use the interactive data browser:
http://resources.arcgis.com/en/help/arcgis-rest-api/02r3/02r300000266000000.htm#GUID-2D66F7F8-83A9-4EAA-B5E2-F09D629939CE
Inputs:
sourceCountry - specify the source country for the search. Use this
parameter to limit the search and query of standard geographic
features to one country. This parameter supports both the
two-digit and three-digit country codes illustrated in the
coverage table.
Examples
Example 1 - Set source country to the United States:
sourceCountry=US
Example 2 - Set source country to the Canada:
sourceCountry=CA
Additional notes
Currently, the service is available for Canada, the
United States and a number of European countries. Other
countries will be added in the near future.
The list of available countries and their associated
IDS are listed in the coverage section.
optionalCountryDataset - Optional parameter to specify a specific
dataset within a defined country. This parameter will not be used
in the Beta release. In the future, some countries may have two or
more datasets that may have different vintages and standard
geography areas. For example, in the United States, there may be
an optional dataset with historic census data from previous years.
Examples
optionalCountryDataset=USA_ESRI_2013
Additional notes
Most countries only have a single dataset.
The United States has multiple datasets.
searchText - Optional parameter to specify the text to query and
search the data collections for the country and datasets
specified. You can use this parameter to query and find specific
keywords that are contained in a data collection.
Default value
(null or empty)
Examples
Example 1 - Return all the data collections and variabels that contain the word furniture:
searchText=furniture
Search terms
A query is broken up into terms and operators. There are two types of terms: Single Terms and Phrases.
A Single Term is a single word such as "Income" or "Households".
A Phrase is a group of words surrounded by double quotes such as "Household Income".
Multiple terms can be combined together with Boolean operators to form a more complex query (see below).
Fields
Geography search supports fielded data. When performing a search, you can either specify a field or use search through all fields.
You can search any field by typing the field name followed by a colon ":" then the term you are looking for.
For example, to search for "Income" in the Alias field:
Alias:Income
The search supports single and multiple character wildcard searches within single terms (not within phrase queries).
To perform a single character wildcard search, use the "?" symbol.
To perform a multiple character wildcard search, use the "*" symbol.
The single character wildcard search looks for terms that match that with the single character replaced. For example, to search for "San" or "Sen" you can use the search:
Fuzzy searches
Fuzzy searches are based on the Levenshtein Distance or Edit Distance algorithm. To perform a fuzzy search, you can explicitly set a fuzzy search by using the tilde symbol "~" at the end of a Single Term.
For example, a term similar in spelling to "Hous" uses the fuzzy search:
Hous~
An additional (optional) numeric parameter can be specified after the tilde symbol ("~") to set the similarity tolerance. The value is between 0 and 1; with a value closer to 1, only terms with a higher similarity will be matched.
For example, if you only want to match terms with a similarity of 0.0 or higher, you can set the fuzzy search as follows:
hous~0.8
The default that is used if the optional similarity number is not provided is 0.5.
Boolean operators
Boolean operators allow terms to be combined through logic operators. The search supports AND, "+", OR, NOT and "-" as Boolean operators. Boolean operators must be ALL CAPS.
In searchText , the AND operator is the default conjunction operator. This means that if there is no Boolean operator between two or more terms, the AND operator is used. The AND operator matches items where both terms exist anywhere in the list of standard geography features. The symbol "&" can be used in place of the word AND.
The OR operator links two terms and finds a matching variable if either of the terms exist. This is equivalent to a union with using sets. The symbol "||" can be used in place of the word OR.
To search for features that contain either "Income" or "Wealth" use the following query:
Income OR Wealth
The "+" or required operator requires that the term after the "+" symbol exist somewhere in the attributes of a variable.
To search for features that must contain "Income" and may contain "Household" use the following query:
+Income OR Household
Escaping Special Characters
Search supports escaping special characters that are part of the query syntax. The available special characters are as follows:
+ - && || ! ( ) { } [ ] ^ " ~ * ? : \
To escape these characters, use the \ before the character.
"""
url = self._base_url + self._url_getVariables
params = {
"f" : "json",
"sourceCountry" : sourceCountry
}
if not searchText is None:
params["searchText"] = searchText
if not optionalCountryDataset is None:
params['optionalCountryDataset'] = optionalCountryDataset
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | [
"def",
"getVariables",
"(",
"self",
",",
"sourceCountry",
",",
"optionalCountryDataset",
"=",
"None",
",",
"searchText",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"_base_url",
"+",
"self",
".",
"_url_getVariables",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
",",
"\"sourceCountry\"",
":",
"sourceCountry",
"}",
"if",
"not",
"searchText",
"is",
"None",
":",
"params",
"[",
"\"searchText\"",
"]",
"=",
"searchText",
"if",
"not",
"optionalCountryDataset",
"is",
"None",
":",
"params",
"[",
"'optionalCountryDataset'",
"]",
"=",
"optionalCountryDataset",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")"
] | r"""
The GeoEnrichment GetVariables helper method allows you to search
the data collections for variables that contain specific keywords.
To see the comprehensive set of global Esri Demographics data that
are available, use the interactive data browser:
http://resources.arcgis.com/en/help/arcgis-rest-api/02r3/02r300000266000000.htm#GUID-2D66F7F8-83A9-4EAA-B5E2-F09D629939CE
Inputs:
sourceCountry - specify the source country for the search. Use this
parameter to limit the search and query of standard geographic
features to one country. This parameter supports both the
two-digit and three-digit country codes illustrated in the
coverage table.
Examples
Example 1 - Set source country to the United States:
sourceCountry=US
Example 2 - Set source country to the Canada:
sourceCountry=CA
Additional notes
Currently, the service is available for Canada, the
United States and a number of European countries. Other
countries will be added in the near future.
The list of available countries and their associated
IDS are listed in the coverage section.
optionalCountryDataset - Optional parameter to specify a specific
dataset within a defined country. This parameter will not be used
in the Beta release. In the future, some countries may have two or
more datasets that may have different vintages and standard
geography areas. For example, in the United States, there may be
an optional dataset with historic census data from previous years.
Examples
optionalCountryDataset=USA_ESRI_2013
Additional notes
Most countries only have a single dataset.
The United States has multiple datasets.
searchText - Optional parameter to specify the text to query and
search the data collections for the country and datasets
specified. You can use this parameter to query and find specific
keywords that are contained in a data collection.
Default value
(null or empty)
Examples
Example 1 - Return all the data collections and variabels that contain the word furniture:
searchText=furniture
Search terms
A query is broken up into terms and operators. There are two types of terms: Single Terms and Phrases.
A Single Term is a single word such as "Income" or "Households".
A Phrase is a group of words surrounded by double quotes such as "Household Income".
Multiple terms can be combined together with Boolean operators to form a more complex query (see below).
Fields
Geography search supports fielded data. When performing a search, you can either specify a field or use search through all fields.
You can search any field by typing the field name followed by a colon ":" then the term you are looking for.
For example, to search for "Income" in the Alias field:
Alias:Income
The search supports single and multiple character wildcard searches within single terms (not within phrase queries).
To perform a single character wildcard search, use the "?" symbol.
To perform a multiple character wildcard search, use the "*" symbol.
The single character wildcard search looks for terms that match that with the single character replaced. For example, to search for "San" or "Sen" you can use the search:
Fuzzy searches
Fuzzy searches are based on the Levenshtein Distance or Edit Distance algorithm. To perform a fuzzy search, you can explicitly set a fuzzy search by using the tilde symbol "~" at the end of a Single Term.
For example, a term similar in spelling to "Hous" uses the fuzzy search:
Hous~
An additional (optional) numeric parameter can be specified after the tilde symbol ("~") to set the similarity tolerance. The value is between 0 and 1; with a value closer to 1, only terms with a higher similarity will be matched.
For example, if you only want to match terms with a similarity of 0.0 or higher, you can set the fuzzy search as follows:
hous~0.8
The default that is used if the optional similarity number is not provided is 0.5.
Boolean operators
Boolean operators allow terms to be combined through logic operators. The search supports AND, "+", OR, NOT and "-" as Boolean operators. Boolean operators must be ALL CAPS.
In searchText , the AND operator is the default conjunction operator. This means that if there is no Boolean operator between two or more terms, the AND operator is used. The AND operator matches items where both terms exist anywhere in the list of standard geography features. The symbol "&" can be used in place of the word AND.
The OR operator links two terms and finds a matching variable if either of the terms exist. This is equivalent to a union with using sets. The symbol "||" can be used in place of the word OR.
To search for features that contain either "Income" or "Wealth" use the following query:
Income OR Wealth
The "+" or required operator requires that the term after the "+" symbol exist somewhere in the attributes of a variable.
To search for features that must contain "Income" and may contain "Household" use the following query:
+Income OR Household
Escaping Special Characters
Search supports escaping special characters that are part of the query syntax. The available special characters are as follows:
+ - && || ! ( ) { } [ ] ^ " ~ * ? : \
To escape these characters, use the \ before the character. | [
"r",
"The",
"GeoEnrichment",
"GetVariables",
"helper",
"method",
"allows",
"you",
"to",
"search",
"the",
"data",
"collections",
"for",
"variables",
"that",
"contain",
"specific",
"keywords",
"."
] | python | train |
jobovy/galpy | galpy/potential/SoftenedNeedleBarPotential.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/SoftenedNeedleBarPotential.py#L112-L129 | def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2016-11-02 - Written - Bovy (UofT)
"""
self._compute_xyzforces(R,z,phi,t)
return numpy.cos(phi)*self._cached_Fx+numpy.sin(phi)*self._cached_Fy | [
"def",
"_Rforce",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"self",
".",
"_compute_xyzforces",
"(",
"R",
",",
"z",
",",
"phi",
",",
"t",
")",
"return",
"numpy",
".",
"cos",
"(",
"phi",
")",
"*",
"self",
".",
"_cached_Fx",
"+",
"numpy",
".",
"sin",
"(",
"phi",
")",
"*",
"self",
".",
"_cached_Fy"
] | NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2016-11-02 - Written - Bovy (UofT) | [
"NAME",
":",
"_Rforce",
"PURPOSE",
":",
"evaluate",
"the",
"radial",
"force",
"for",
"this",
"potential",
"INPUT",
":",
"R",
"-",
"Galactocentric",
"cylindrical",
"radius",
"z",
"-",
"vertical",
"height",
"phi",
"-",
"azimuth",
"t",
"-",
"time",
"OUTPUT",
":",
"the",
"radial",
"force",
"HISTORY",
":",
"2016",
"-",
"11",
"-",
"02",
"-",
"Written",
"-",
"Bovy",
"(",
"UofT",
")"
] | python | train |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/context.py | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/context.py#L323-L326 | def _flush_ndb_puts(self, items, options):
"""Flush all NDB puts to datastore."""
assert ndb is not None
ndb.put_multi(items, config=self._create_config(options)) | [
"def",
"_flush_ndb_puts",
"(",
"self",
",",
"items",
",",
"options",
")",
":",
"assert",
"ndb",
"is",
"not",
"None",
"ndb",
".",
"put_multi",
"(",
"items",
",",
"config",
"=",
"self",
".",
"_create_config",
"(",
"options",
")",
")"
] | Flush all NDB puts to datastore. | [
"Flush",
"all",
"NDB",
"puts",
"to",
"datastore",
"."
] | python | train |
seequent/properties | properties/basic.py | https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/basic.py#L376-L383 | def sphinx_class(self):
"""Property class name formatted for Sphinx doc linking"""
classdoc = ':class:`{cls} <{pref}.{cls}>`'
if self.__module__.split('.')[0] == 'properties':
pref = 'properties'
else:
pref = text_type(self.__module__)
return classdoc.format(cls=self.__class__.__name__, pref=pref) | [
"def",
"sphinx_class",
"(",
"self",
")",
":",
"classdoc",
"=",
"':class:`{cls} <{pref}.{cls}>`'",
"if",
"self",
".",
"__module__",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"==",
"'properties'",
":",
"pref",
"=",
"'properties'",
"else",
":",
"pref",
"=",
"text_type",
"(",
"self",
".",
"__module__",
")",
"return",
"classdoc",
".",
"format",
"(",
"cls",
"=",
"self",
".",
"__class__",
".",
"__name__",
",",
"pref",
"=",
"pref",
")"
] | Property class name formatted for Sphinx doc linking | [
"Property",
"class",
"name",
"formatted",
"for",
"Sphinx",
"doc",
"linking"
] | python | train |
rq/Flask-RQ2 | src/flask_rq2/functions.py | https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/functions.py#L65-L139 | def queue(self, *args, **kwargs):
"""
A function to queue a RQ job, e.g.::
@rq.job(timeout=60)
def add(x, y):
return x + y
add.queue(1, 2, timeout=30)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param job_id: A custom ID for the new job. Defaults to an
:mod:`UUID <uuid>`.
:type job_id: str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
result_ttl = kwargs.pop('result_ttl', self.result_ttl)
ttl = kwargs.pop('ttl', self.ttl)
depends_on = kwargs.pop('depends_on', self._depends_on)
job_id = kwargs.pop('job_id', None)
at_front = kwargs.pop('at_front', self._at_front)
meta = kwargs.pop('meta', self._meta)
description = kwargs.pop('description', self._description)
return self.rq.get_queue(queue_name).enqueue_call(
self.wrapped,
args=args,
kwargs=kwargs,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
job_id=job_id,
at_front=at_front,
meta=meta,
description=description,
) | [
"def",
"queue",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"queue_name",
"=",
"kwargs",
".",
"pop",
"(",
"'queue'",
",",
"self",
".",
"queue_name",
")",
"timeout",
"=",
"kwargs",
".",
"pop",
"(",
"'timeout'",
",",
"self",
".",
"timeout",
")",
"result_ttl",
"=",
"kwargs",
".",
"pop",
"(",
"'result_ttl'",
",",
"self",
".",
"result_ttl",
")",
"ttl",
"=",
"kwargs",
".",
"pop",
"(",
"'ttl'",
",",
"self",
".",
"ttl",
")",
"depends_on",
"=",
"kwargs",
".",
"pop",
"(",
"'depends_on'",
",",
"self",
".",
"_depends_on",
")",
"job_id",
"=",
"kwargs",
".",
"pop",
"(",
"'job_id'",
",",
"None",
")",
"at_front",
"=",
"kwargs",
".",
"pop",
"(",
"'at_front'",
",",
"self",
".",
"_at_front",
")",
"meta",
"=",
"kwargs",
".",
"pop",
"(",
"'meta'",
",",
"self",
".",
"_meta",
")",
"description",
"=",
"kwargs",
".",
"pop",
"(",
"'description'",
",",
"self",
".",
"_description",
")",
"return",
"self",
".",
"rq",
".",
"get_queue",
"(",
"queue_name",
")",
".",
"enqueue_call",
"(",
"self",
".",
"wrapped",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
",",
"timeout",
"=",
"timeout",
",",
"result_ttl",
"=",
"result_ttl",
",",
"ttl",
"=",
"ttl",
",",
"depends_on",
"=",
"depends_on",
",",
"job_id",
"=",
"job_id",
",",
"at_front",
"=",
"at_front",
",",
"meta",
"=",
"meta",
",",
"description",
"=",
"description",
",",
")"
] | A function to queue a RQ job, e.g.::
@rq.job(timeout=60)
def add(x, y):
return x + y
add.queue(1, 2, timeout=30)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param job_id: A custom ID for the new job. Defaults to an
:mod:`UUID <uuid>`.
:type job_id: str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob | [
"A",
"function",
"to",
"queue",
"a",
"RQ",
"job",
"e",
".",
"g",
".",
"::"
] | python | train |
jkitzes/macroeco | macroeco/empirical/_empirical.py | https://github.com/jkitzes/macroeco/blob/ee5fac5560a2d64de3a64738b5bc6833e2d7ff2e/macroeco/empirical/_empirical.py#L310-L373 | def _subset_meta(full_meta, subset, incremented=False):
"""
Return metadata reflecting all conditions in subset
Parameters
----------
full_meta : ConfigParser obj
Metadata object
subset : str
String describing subset of data to use for analysis
incremented : bool
If True, the metadata has already been incremented
Returns
-------
Configparser object or dict
Updated version of full_meta accounting for subset string
"""
if not subset:
return full_meta, False
meta = {} # Make deepcopy of entire meta (all section dicts in meta dict)
for key, val in full_meta.iteritems():
meta[key] = copy.deepcopy(dict(val))
conditions = subset.replace(' ','').split(';')
inc = False
for condition in conditions:
condition_list = re.split('[<>=]', condition)
col = condition_list[0]
val = condition_list[-1]
try:
col_step = meta[col]['step']
except: # If there's no metadata for this col, do nothing
continue
operator = re.sub('[^<>=]', '', condition)
if operator == '==':
meta[col]['min'] = val
meta[col]['max'] = val
elif operator == '>=':
meta[col]['min'] = val
elif operator == '>':
if incremented:
meta[col]['min'] = val
else:
meta[col]['min'] = str(eval(val) + eval(col_step))
inc = True
elif operator == '<=':
meta[col]['max'] = val
elif operator == '<':
if incremented:
meta[col]['max'] = val
else:
meta[col]['max'] = str(eval(val) - eval(col_step))
inc = True
else:
raise ValueError, "Subset %s not valid" % condition
return meta, inc | [
"def",
"_subset_meta",
"(",
"full_meta",
",",
"subset",
",",
"incremented",
"=",
"False",
")",
":",
"if",
"not",
"subset",
":",
"return",
"full_meta",
",",
"False",
"meta",
"=",
"{",
"}",
"# Make deepcopy of entire meta (all section dicts in meta dict)",
"for",
"key",
",",
"val",
"in",
"full_meta",
".",
"iteritems",
"(",
")",
":",
"meta",
"[",
"key",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"dict",
"(",
"val",
")",
")",
"conditions",
"=",
"subset",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"split",
"(",
"';'",
")",
"inc",
"=",
"False",
"for",
"condition",
"in",
"conditions",
":",
"condition_list",
"=",
"re",
".",
"split",
"(",
"'[<>=]'",
",",
"condition",
")",
"col",
"=",
"condition_list",
"[",
"0",
"]",
"val",
"=",
"condition_list",
"[",
"-",
"1",
"]",
"try",
":",
"col_step",
"=",
"meta",
"[",
"col",
"]",
"[",
"'step'",
"]",
"except",
":",
"# If there's no metadata for this col, do nothing",
"continue",
"operator",
"=",
"re",
".",
"sub",
"(",
"'[^<>=]'",
",",
"''",
",",
"condition",
")",
"if",
"operator",
"==",
"'=='",
":",
"meta",
"[",
"col",
"]",
"[",
"'min'",
"]",
"=",
"val",
"meta",
"[",
"col",
"]",
"[",
"'max'",
"]",
"=",
"val",
"elif",
"operator",
"==",
"'>='",
":",
"meta",
"[",
"col",
"]",
"[",
"'min'",
"]",
"=",
"val",
"elif",
"operator",
"==",
"'>'",
":",
"if",
"incremented",
":",
"meta",
"[",
"col",
"]",
"[",
"'min'",
"]",
"=",
"val",
"else",
":",
"meta",
"[",
"col",
"]",
"[",
"'min'",
"]",
"=",
"str",
"(",
"eval",
"(",
"val",
")",
"+",
"eval",
"(",
"col_step",
")",
")",
"inc",
"=",
"True",
"elif",
"operator",
"==",
"'<='",
":",
"meta",
"[",
"col",
"]",
"[",
"'max'",
"]",
"=",
"val",
"elif",
"operator",
"==",
"'<'",
":",
"if",
"incremented",
":",
"meta",
"[",
"col",
"]",
"[",
"'max'",
"]",
"=",
"val",
"else",
":",
"meta",
"[",
"col",
"]",
"[",
"'max'",
"]",
"=",
"str",
"(",
"eval",
"(",
"val",
")",
"-",
"eval",
"(",
"col_step",
")",
")",
"inc",
"=",
"True",
"else",
":",
"raise",
"ValueError",
",",
"\"Subset %s not valid\"",
"%",
"condition",
"return",
"meta",
",",
"inc"
] | Return metadata reflecting all conditions in subset
Parameters
----------
full_meta : ConfigParser obj
Metadata object
subset : str
String describing subset of data to use for analysis
incremented : bool
If True, the metadata has already been incremented
Returns
-------
Configparser object or dict
Updated version of full_meta accounting for subset string | [
"Return",
"metadata",
"reflecting",
"all",
"conditions",
"in",
"subset"
] | python | train |
bitprophet/botox | botox/aws.py | https://github.com/bitprophet/botox/blob/02c887a28bd2638273548cc7d1e6d6f1d4d38bf9/botox/aws.py#L217-L231 | def get_subnet_id(self, name):
"""
Return subnet ID for given ``name``, if it exists.
E.g. with a subnet mapping of ``{'abc123': 'ops', '67fd56': 'prod'}``,
``get_subnet_id('ops')`` would return ``'abc123'``. If the map has
non-unique values, the first matching key will be returned.
If no match is found, the given ``name`` is returned as-is. This works
well for e.g. normalizing names-or-IDs to just IDs.
"""
for subnet_id, subnet_name in self.config['subnets'].iteritems():
if subnet_name == name:
return subnet_id
return name | [
"def",
"get_subnet_id",
"(",
"self",
",",
"name",
")",
":",
"for",
"subnet_id",
",",
"subnet_name",
"in",
"self",
".",
"config",
"[",
"'subnets'",
"]",
".",
"iteritems",
"(",
")",
":",
"if",
"subnet_name",
"==",
"name",
":",
"return",
"subnet_id",
"return",
"name"
] | Return subnet ID for given ``name``, if it exists.
E.g. with a subnet mapping of ``{'abc123': 'ops', '67fd56': 'prod'}``,
``get_subnet_id('ops')`` would return ``'abc123'``. If the map has
non-unique values, the first matching key will be returned.
If no match is found, the given ``name`` is returned as-is. This works
well for e.g. normalizing names-or-IDs to just IDs. | [
"Return",
"subnet",
"ID",
"for",
"given",
"name",
"if",
"it",
"exists",
"."
] | python | train |
aequitas/python-rflink | rflink/protocol.py | https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L282-L291 | def handle_event(self, event):
"""Handle incoming packet from rflink gateway."""
if event.get('command'):
if event['command'] == 'on':
cmd = 'off'
else:
cmd = 'on'
task = self.send_command_ack(event['id'], cmd)
self.loop.create_task(task) | [
"def",
"handle_event",
"(",
"self",
",",
"event",
")",
":",
"if",
"event",
".",
"get",
"(",
"'command'",
")",
":",
"if",
"event",
"[",
"'command'",
"]",
"==",
"'on'",
":",
"cmd",
"=",
"'off'",
"else",
":",
"cmd",
"=",
"'on'",
"task",
"=",
"self",
".",
"send_command_ack",
"(",
"event",
"[",
"'id'",
"]",
",",
"cmd",
")",
"self",
".",
"loop",
".",
"create_task",
"(",
"task",
")"
] | Handle incoming packet from rflink gateway. | [
"Handle",
"incoming",
"packet",
"from",
"rflink",
"gateway",
"."
] | python | train |
seleniumbase/SeleniumBase | seleniumbase/fixtures/base_case.py | https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L3079-L3200 | def setUp(self, masterqa_mode=False):
"""
Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass setUp() method:
super(SubClassOfBaseCase, self).setUp()
"""
self.masterqa_mode = masterqa_mode
self.is_pytest = None
try:
# This raises an exception if the test is not coming from pytest
self.is_pytest = sb_config.is_pytest
except Exception:
# Not using pytest (probably nosetests)
self.is_pytest = False
if self.is_pytest:
# pytest-specific code
test_id = "%s.%s.%s" % (self.__class__.__module__,
self.__class__.__name__,
self._testMethodName)
self.browser = sb_config.browser
self.data = sb_config.data
self.demo_mode = sb_config.demo_mode
self.demo_sleep = sb_config.demo_sleep
self.highlights = sb_config.highlights
self.environment = sb_config.environment
self.env = self.environment # Add a shortened version
self.with_selenium = sb_config.with_selenium # Should be True
self.headless = sb_config.headless
self.headless_active = False
self.log_path = sb_config.log_path
self.with_testing_base = sb_config.with_testing_base
self.with_basic_test_info = sb_config.with_basic_test_info
self.with_screen_shots = sb_config.with_screen_shots
self.with_page_source = sb_config.with_page_source
self.with_db_reporting = sb_config.with_db_reporting
self.with_s3_logging = sb_config.with_s3_logging
self.servername = sb_config.servername
self.port = sb_config.port
self.proxy_string = sb_config.proxy_string
self.user_agent = sb_config.user_agent
self.cap_file = sb_config.cap_file
self.database_env = sb_config.database_env
self.message_duration = sb_config.message_duration
self.js_checking_on = sb_config.js_checking_on
self.ad_block_on = sb_config.ad_block_on
self.verify_delay = sb_config.verify_delay
self.disable_csp = sb_config.disable_csp
self.save_screenshot_after_test = sb_config.save_screenshot
self.visual_baseline = sb_config.visual_baseline
self.timeout_multiplier = sb_config.timeout_multiplier
self.pytest_html_report = sb_config.pytest_html_report
self.report_on = False
if self.pytest_html_report:
self.report_on = True
self.use_grid = False
if self.servername != "localhost":
# Use Selenium Grid (Use --server=127.0.0.1 for localhost Grid)
self.use_grid = True
if self.with_db_reporting:
from seleniumbase.core.application_manager import (
ApplicationManager)
from seleniumbase.core.testcase_manager import (
ExecutionQueryPayload)
import getpass
self.execution_guid = str(uuid.uuid4())
self.testcase_guid = None
self.execution_start_time = 0
self.case_start_time = 0
self.application = None
self.testcase_manager = None
self.error_handled = False
self.testcase_manager = TestcaseManager(self.database_env)
#
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload)
#
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if self.with_selenium:
data_payload.browser = self.browser
else:
data_payload.browser = "N/A"
data_payload.test_address = test_id
application = ApplicationManager.generate_application_string(
self._testMethodName)
data_payload.env = application.split('.')[0]
data_payload.start_time = application.split('.')[1]
data_payload.state = constants.State.NOTRUN
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int(time.time() * 1000)
if self.headless:
try:
from pyvirtualdisplay import Display
self.display = Display(visible=0, size=(1440, 1080))
self.display.start()
self.headless_active = True
except Exception:
# pyvirtualdisplay might not be necessary anymore because
# Chrome and Firefox now have built-in headless displays
pass
# Launch WebDriver for both Pytest and Nosetests
if not hasattr(self, "browser"):
raise Exception("""SeleniumBase plugins did not load! """
"""Please reinstall using:\n"""
""" >>> "python setup.py install" <<< """)
self.driver = self.get_new_driver(browser=self.browser,
headless=self.headless,
servername=self.servername,
port=self.port,
proxy=self.proxy_string,
agent=self.user_agent,
switch_to=True,
cap_file=self.cap_file,
disable_csp=self.disable_csp)
self._default_driver = self.driver | [
"def",
"setUp",
"(",
"self",
",",
"masterqa_mode",
"=",
"False",
")",
":",
"self",
".",
"masterqa_mode",
"=",
"masterqa_mode",
"self",
".",
"is_pytest",
"=",
"None",
"try",
":",
"# This raises an exception if the test is not coming from pytest",
"self",
".",
"is_pytest",
"=",
"sb_config",
".",
"is_pytest",
"except",
"Exception",
":",
"# Not using pytest (probably nosetests)",
"self",
".",
"is_pytest",
"=",
"False",
"if",
"self",
".",
"is_pytest",
":",
"# pytest-specific code",
"test_id",
"=",
"\"%s.%s.%s\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__module__",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"_testMethodName",
")",
"self",
".",
"browser",
"=",
"sb_config",
".",
"browser",
"self",
".",
"data",
"=",
"sb_config",
".",
"data",
"self",
".",
"demo_mode",
"=",
"sb_config",
".",
"demo_mode",
"self",
".",
"demo_sleep",
"=",
"sb_config",
".",
"demo_sleep",
"self",
".",
"highlights",
"=",
"sb_config",
".",
"highlights",
"self",
".",
"environment",
"=",
"sb_config",
".",
"environment",
"self",
".",
"env",
"=",
"self",
".",
"environment",
"# Add a shortened version",
"self",
".",
"with_selenium",
"=",
"sb_config",
".",
"with_selenium",
"# Should be True",
"self",
".",
"headless",
"=",
"sb_config",
".",
"headless",
"self",
".",
"headless_active",
"=",
"False",
"self",
".",
"log_path",
"=",
"sb_config",
".",
"log_path",
"self",
".",
"with_testing_base",
"=",
"sb_config",
".",
"with_testing_base",
"self",
".",
"with_basic_test_info",
"=",
"sb_config",
".",
"with_basic_test_info",
"self",
".",
"with_screen_shots",
"=",
"sb_config",
".",
"with_screen_shots",
"self",
".",
"with_page_source",
"=",
"sb_config",
".",
"with_page_source",
"self",
".",
"with_db_reporting",
"=",
"sb_config",
".",
"with_db_reporting",
"self",
".",
"with_s3_logging",
"=",
"sb_config",
".",
"with_s3_logging",
"self",
".",
"servername",
"=",
"sb_config",
".",
"servername",
"self",
".",
"port",
"=",
"sb_config",
".",
"port",
"self",
".",
"proxy_string",
"=",
"sb_config",
".",
"proxy_string",
"self",
".",
"user_agent",
"=",
"sb_config",
".",
"user_agent",
"self",
".",
"cap_file",
"=",
"sb_config",
".",
"cap_file",
"self",
".",
"database_env",
"=",
"sb_config",
".",
"database_env",
"self",
".",
"message_duration",
"=",
"sb_config",
".",
"message_duration",
"self",
".",
"js_checking_on",
"=",
"sb_config",
".",
"js_checking_on",
"self",
".",
"ad_block_on",
"=",
"sb_config",
".",
"ad_block_on",
"self",
".",
"verify_delay",
"=",
"sb_config",
".",
"verify_delay",
"self",
".",
"disable_csp",
"=",
"sb_config",
".",
"disable_csp",
"self",
".",
"save_screenshot_after_test",
"=",
"sb_config",
".",
"save_screenshot",
"self",
".",
"visual_baseline",
"=",
"sb_config",
".",
"visual_baseline",
"self",
".",
"timeout_multiplier",
"=",
"sb_config",
".",
"timeout_multiplier",
"self",
".",
"pytest_html_report",
"=",
"sb_config",
".",
"pytest_html_report",
"self",
".",
"report_on",
"=",
"False",
"if",
"self",
".",
"pytest_html_report",
":",
"self",
".",
"report_on",
"=",
"True",
"self",
".",
"use_grid",
"=",
"False",
"if",
"self",
".",
"servername",
"!=",
"\"localhost\"",
":",
"# Use Selenium Grid (Use --server=127.0.0.1 for localhost Grid)",
"self",
".",
"use_grid",
"=",
"True",
"if",
"self",
".",
"with_db_reporting",
":",
"from",
"seleniumbase",
".",
"core",
".",
"application_manager",
"import",
"(",
"ApplicationManager",
")",
"from",
"seleniumbase",
".",
"core",
".",
"testcase_manager",
"import",
"(",
"ExecutionQueryPayload",
")",
"import",
"getpass",
"self",
".",
"execution_guid",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"self",
".",
"testcase_guid",
"=",
"None",
"self",
".",
"execution_start_time",
"=",
"0",
"self",
".",
"case_start_time",
"=",
"0",
"self",
".",
"application",
"=",
"None",
"self",
".",
"testcase_manager",
"=",
"None",
"self",
".",
"error_handled",
"=",
"False",
"self",
".",
"testcase_manager",
"=",
"TestcaseManager",
"(",
"self",
".",
"database_env",
")",
"#",
"exec_payload",
"=",
"ExecutionQueryPayload",
"(",
")",
"exec_payload",
".",
"execution_start_time",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
"self",
".",
"execution_start_time",
"=",
"exec_payload",
".",
"execution_start_time",
"exec_payload",
".",
"guid",
"=",
"self",
".",
"execution_guid",
"exec_payload",
".",
"username",
"=",
"getpass",
".",
"getuser",
"(",
")",
"self",
".",
"testcase_manager",
".",
"insert_execution_data",
"(",
"exec_payload",
")",
"#",
"data_payload",
"=",
"TestcaseDataPayload",
"(",
")",
"self",
".",
"testcase_guid",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"data_payload",
".",
"guid",
"=",
"self",
".",
"testcase_guid",
"data_payload",
".",
"execution_guid",
"=",
"self",
".",
"execution_guid",
"if",
"self",
".",
"with_selenium",
":",
"data_payload",
".",
"browser",
"=",
"self",
".",
"browser",
"else",
":",
"data_payload",
".",
"browser",
"=",
"\"N/A\"",
"data_payload",
".",
"test_address",
"=",
"test_id",
"application",
"=",
"ApplicationManager",
".",
"generate_application_string",
"(",
"self",
".",
"_testMethodName",
")",
"data_payload",
".",
"env",
"=",
"application",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"data_payload",
".",
"start_time",
"=",
"application",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"data_payload",
".",
"state",
"=",
"constants",
".",
"State",
".",
"NOTRUN",
"self",
".",
"testcase_manager",
".",
"insert_testcase_data",
"(",
"data_payload",
")",
"self",
".",
"case_start_time",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
"if",
"self",
".",
"headless",
":",
"try",
":",
"from",
"pyvirtualdisplay",
"import",
"Display",
"self",
".",
"display",
"=",
"Display",
"(",
"visible",
"=",
"0",
",",
"size",
"=",
"(",
"1440",
",",
"1080",
")",
")",
"self",
".",
"display",
".",
"start",
"(",
")",
"self",
".",
"headless_active",
"=",
"True",
"except",
"Exception",
":",
"# pyvirtualdisplay might not be necessary anymore because",
"# Chrome and Firefox now have built-in headless displays",
"pass",
"# Launch WebDriver for both Pytest and Nosetests",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"browser\"",
")",
":",
"raise",
"Exception",
"(",
"\"\"\"SeleniumBase plugins did not load! \"\"\"",
"\"\"\"Please reinstall using:\\n\"\"\"",
"\"\"\" >>> \"python setup.py install\" <<< \"\"\"",
")",
"self",
".",
"driver",
"=",
"self",
".",
"get_new_driver",
"(",
"browser",
"=",
"self",
".",
"browser",
",",
"headless",
"=",
"self",
".",
"headless",
",",
"servername",
"=",
"self",
".",
"servername",
",",
"port",
"=",
"self",
".",
"port",
",",
"proxy",
"=",
"self",
".",
"proxy_string",
",",
"agent",
"=",
"self",
".",
"user_agent",
",",
"switch_to",
"=",
"True",
",",
"cap_file",
"=",
"self",
".",
"cap_file",
",",
"disable_csp",
"=",
"self",
".",
"disable_csp",
")",
"self",
".",
"_default_driver",
"=",
"self",
".",
"driver"
] | Be careful if a subclass of BaseCase overrides setUp()
You'll need to add the following line to the subclass setUp() method:
super(SubClassOfBaseCase, self).setUp() | [
"Be",
"careful",
"if",
"a",
"subclass",
"of",
"BaseCase",
"overrides",
"setUp",
"()",
"You",
"ll",
"need",
"to",
"add",
"the",
"following",
"line",
"to",
"the",
"subclass",
"setUp",
"()",
"method",
":",
"super",
"(",
"SubClassOfBaseCase",
"self",
")",
".",
"setUp",
"()"
] | python | train |
DLR-RM/RAFCON | source/rafcon/core/global_variable_manager.py | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/global_variable_manager.py#L310-L319 | def global_variable_dictionary(self):
"""Property for the _global_variable_dictionary field"""
dict_copy = {}
for key, value in self.__global_variable_dictionary.items():
if key in self.__variable_references and self.__variable_references[key]:
dict_copy[key] = value
else:
dict_copy[key] = copy.deepcopy(value)
return dict_copy | [
"def",
"global_variable_dictionary",
"(",
"self",
")",
":",
"dict_copy",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"self",
".",
"__global_variable_dictionary",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"self",
".",
"__variable_references",
"and",
"self",
".",
"__variable_references",
"[",
"key",
"]",
":",
"dict_copy",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"dict_copy",
"[",
"key",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"value",
")",
"return",
"dict_copy"
] | Property for the _global_variable_dictionary field | [
"Property",
"for",
"the",
"_global_variable_dictionary",
"field"
] | python | train |
saltstack/salt | salt/modules/vsphere.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L8127-L8194 | def _apply_serial_port(serial_device_spec, key, operation='add'):
'''
Returns a vim.vm.device.VirtualSerialPort representing a serial port
component
serial_device_spec
Serial device properties
key
Unique key of the device
operation
Add or edit the given device
.. code-block:: bash
serial_ports:
adapter: 'Serial port 1'
backing:
type: uri
uri: 'telnet://something:port'
direction: <client|server>
filename: 'service_uri'
connectable:
allow_guest_control: True
start_connected: True
yield: False
'''
log.trace('Creating serial port adapter=%s type=%s connectable=%s yield=%s',
serial_device_spec['adapter'], serial_device_spec['type'],
serial_device_spec['connectable'], serial_device_spec['yield'])
device_spec = vim.vm.device.VirtualDeviceSpec()
device_spec.device = vim.vm.device.VirtualSerialPort()
if operation == 'add':
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
elif operation == 'edit':
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
connect_info = vim.vm.device.VirtualDevice.ConnectInfo()
type_backing = None
if serial_device_spec['type'] == 'network':
type_backing = vim.vm.device.VirtualSerialPort.URIBackingInfo()
if 'uri' not in serial_device_spec['backing'].keys():
raise ValueError('vSPC proxy URI not specified in config')
if 'uri' not in serial_device_spec['backing'].keys():
raise ValueError('vSPC Direction not specified in config')
if 'filename' not in serial_device_spec['backing'].keys():
raise ValueError('vSPC Filename not specified in config')
type_backing.proxyURI = serial_device_spec['backing']['uri']
type_backing.direction = serial_device_spec['backing']['direction']
type_backing.serviceURI = serial_device_spec['backing']['filename']
if serial_device_spec['type'] == 'pipe':
type_backing = vim.vm.device.VirtualSerialPort.PipeBackingInfo()
if serial_device_spec['type'] == 'file':
type_backing = vim.vm.device.VirtualSerialPort.FileBackingInfo()
if serial_device_spec['type'] == 'device':
type_backing = vim.vm.device.VirtualSerialPort.DeviceBackingInfo()
connect_info.allowGuestControl = \
serial_device_spec['connectable']['allow_guest_control']
connect_info.startConnected = \
serial_device_spec['connectable']['start_connected']
device_spec.device.backing = type_backing
device_spec.device.connectable = connect_info
device_spec.device.unitNumber = 1
device_spec.device.key = key
device_spec.device.yieldOnPoll = serial_device_spec['yield']
return device_spec | [
"def",
"_apply_serial_port",
"(",
"serial_device_spec",
",",
"key",
",",
"operation",
"=",
"'add'",
")",
":",
"log",
".",
"trace",
"(",
"'Creating serial port adapter=%s type=%s connectable=%s yield=%s'",
",",
"serial_device_spec",
"[",
"'adapter'",
"]",
",",
"serial_device_spec",
"[",
"'type'",
"]",
",",
"serial_device_spec",
"[",
"'connectable'",
"]",
",",
"serial_device_spec",
"[",
"'yield'",
"]",
")",
"device_spec",
"=",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualDeviceSpec",
"(",
")",
"device_spec",
".",
"device",
"=",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualSerialPort",
"(",
")",
"if",
"operation",
"==",
"'add'",
":",
"device_spec",
".",
"operation",
"=",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualDeviceSpec",
".",
"Operation",
".",
"add",
"elif",
"operation",
"==",
"'edit'",
":",
"device_spec",
".",
"operation",
"=",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualDeviceSpec",
".",
"Operation",
".",
"edit",
"connect_info",
"=",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualDevice",
".",
"ConnectInfo",
"(",
")",
"type_backing",
"=",
"None",
"if",
"serial_device_spec",
"[",
"'type'",
"]",
"==",
"'network'",
":",
"type_backing",
"=",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualSerialPort",
".",
"URIBackingInfo",
"(",
")",
"if",
"'uri'",
"not",
"in",
"serial_device_spec",
"[",
"'backing'",
"]",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'vSPC proxy URI not specified in config'",
")",
"if",
"'uri'",
"not",
"in",
"serial_device_spec",
"[",
"'backing'",
"]",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'vSPC Direction not specified in config'",
")",
"if",
"'filename'",
"not",
"in",
"serial_device_spec",
"[",
"'backing'",
"]",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'vSPC Filename not specified in config'",
")",
"type_backing",
".",
"proxyURI",
"=",
"serial_device_spec",
"[",
"'backing'",
"]",
"[",
"'uri'",
"]",
"type_backing",
".",
"direction",
"=",
"serial_device_spec",
"[",
"'backing'",
"]",
"[",
"'direction'",
"]",
"type_backing",
".",
"serviceURI",
"=",
"serial_device_spec",
"[",
"'backing'",
"]",
"[",
"'filename'",
"]",
"if",
"serial_device_spec",
"[",
"'type'",
"]",
"==",
"'pipe'",
":",
"type_backing",
"=",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualSerialPort",
".",
"PipeBackingInfo",
"(",
")",
"if",
"serial_device_spec",
"[",
"'type'",
"]",
"==",
"'file'",
":",
"type_backing",
"=",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualSerialPort",
".",
"FileBackingInfo",
"(",
")",
"if",
"serial_device_spec",
"[",
"'type'",
"]",
"==",
"'device'",
":",
"type_backing",
"=",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualSerialPort",
".",
"DeviceBackingInfo",
"(",
")",
"connect_info",
".",
"allowGuestControl",
"=",
"serial_device_spec",
"[",
"'connectable'",
"]",
"[",
"'allow_guest_control'",
"]",
"connect_info",
".",
"startConnected",
"=",
"serial_device_spec",
"[",
"'connectable'",
"]",
"[",
"'start_connected'",
"]",
"device_spec",
".",
"device",
".",
"backing",
"=",
"type_backing",
"device_spec",
".",
"device",
".",
"connectable",
"=",
"connect_info",
"device_spec",
".",
"device",
".",
"unitNumber",
"=",
"1",
"device_spec",
".",
"device",
".",
"key",
"=",
"key",
"device_spec",
".",
"device",
".",
"yieldOnPoll",
"=",
"serial_device_spec",
"[",
"'yield'",
"]",
"return",
"device_spec"
] | Returns a vim.vm.device.VirtualSerialPort representing a serial port
component
serial_device_spec
Serial device properties
key
Unique key of the device
operation
Add or edit the given device
.. code-block:: bash
serial_ports:
adapter: 'Serial port 1'
backing:
type: uri
uri: 'telnet://something:port'
direction: <client|server>
filename: 'service_uri'
connectable:
allow_guest_control: True
start_connected: True
yield: False | [
"Returns",
"a",
"vim",
".",
"vm",
".",
"device",
".",
"VirtualSerialPort",
"representing",
"a",
"serial",
"port",
"component"
] | python | train |
yola/yoconfigurator | yoconfigurator/smush.py | https://github.com/yola/yoconfigurator/blob/dfb60fa1e30ae7cfec2526bb101fc205f5952639/yoconfigurator/smush.py#L72-L84 | def smush_config(sources, initial=None):
"""Merge the configuration sources and return the resulting DotDict."""
if initial is None:
initial = {}
config = DotDict(initial)
for fn in sources:
log.debug('Merging %s', fn)
mod = get_config_module(fn)
config = mod.update(config)
log.debug('Current config:\n%s', json.dumps(config, indent=4,
cls=LenientJSONEncoder))
return config | [
"def",
"smush_config",
"(",
"sources",
",",
"initial",
"=",
"None",
")",
":",
"if",
"initial",
"is",
"None",
":",
"initial",
"=",
"{",
"}",
"config",
"=",
"DotDict",
"(",
"initial",
")",
"for",
"fn",
"in",
"sources",
":",
"log",
".",
"debug",
"(",
"'Merging %s'",
",",
"fn",
")",
"mod",
"=",
"get_config_module",
"(",
"fn",
")",
"config",
"=",
"mod",
".",
"update",
"(",
"config",
")",
"log",
".",
"debug",
"(",
"'Current config:\\n%s'",
",",
"json",
".",
"dumps",
"(",
"config",
",",
"indent",
"=",
"4",
",",
"cls",
"=",
"LenientJSONEncoder",
")",
")",
"return",
"config"
] | Merge the configuration sources and return the resulting DotDict. | [
"Merge",
"the",
"configuration",
"sources",
"and",
"return",
"the",
"resulting",
"DotDict",
"."
] | python | valid |
EventTeam/beliefs | src/beliefs/cells/numeric.py | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/numeric.py#L63-L72 | def is_contradictory(self, other):
"""
Whether other and self can coexist
"""
other = IntervalCell.coerce(other)
assert other.low <= other.high, "Low must be <= high"
if max(other.low, self.low) <= min(other.high, self.high):
return False
else:
return True | [
"def",
"is_contradictory",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"IntervalCell",
".",
"coerce",
"(",
"other",
")",
"assert",
"other",
".",
"low",
"<=",
"other",
".",
"high",
",",
"\"Low must be <= high\"",
"if",
"max",
"(",
"other",
".",
"low",
",",
"self",
".",
"low",
")",
"<=",
"min",
"(",
"other",
".",
"high",
",",
"self",
".",
"high",
")",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | Whether other and self can coexist | [
"Whether",
"other",
"and",
"self",
"can",
"coexist"
] | python | train |
Cue/scales | src/greplin/scales/graphite.py | https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/graphite.py#L55-L67 | def _forbidden(self, path, value):
"""Is a stat forbidden? Goes through the rules to find one that
applies. Chronologically newer rules are higher-precedence than
older ones. If no rule applies, the stat is forbidden by default."""
if path[0] == '/':
path = path[1:]
for rule in reversed(self.rules):
if isinstance(rule[1], six.string_types):
if fnmatch(path, rule[1]):
return not rule[0]
elif rule[1](path, value):
return not rule[0]
return True | [
"def",
"_forbidden",
"(",
"self",
",",
"path",
",",
"value",
")",
":",
"if",
"path",
"[",
"0",
"]",
"==",
"'/'",
":",
"path",
"=",
"path",
"[",
"1",
":",
"]",
"for",
"rule",
"in",
"reversed",
"(",
"self",
".",
"rules",
")",
":",
"if",
"isinstance",
"(",
"rule",
"[",
"1",
"]",
",",
"six",
".",
"string_types",
")",
":",
"if",
"fnmatch",
"(",
"path",
",",
"rule",
"[",
"1",
"]",
")",
":",
"return",
"not",
"rule",
"[",
"0",
"]",
"elif",
"rule",
"[",
"1",
"]",
"(",
"path",
",",
"value",
")",
":",
"return",
"not",
"rule",
"[",
"0",
"]",
"return",
"True"
] | Is a stat forbidden? Goes through the rules to find one that
applies. Chronologically newer rules are higher-precedence than
older ones. If no rule applies, the stat is forbidden by default. | [
"Is",
"a",
"stat",
"forbidden?",
"Goes",
"through",
"the",
"rules",
"to",
"find",
"one",
"that",
"applies",
".",
"Chronologically",
"newer",
"rules",
"are",
"higher",
"-",
"precedence",
"than",
"older",
"ones",
".",
"If",
"no",
"rule",
"applies",
"the",
"stat",
"is",
"forbidden",
"by",
"default",
"."
] | python | train |
shapiromatron/bmds | bmds/models/base.py | https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/models/base.py#L316-L323 | def write_dfile(self):
"""
Write the generated d_file to a temporary file.
"""
f_in = self.tempfiles.get_tempfile(prefix="bmds-", suffix=".(d)")
with open(f_in, "w") as f:
f.write(self.as_dfile())
return f_in | [
"def",
"write_dfile",
"(",
"self",
")",
":",
"f_in",
"=",
"self",
".",
"tempfiles",
".",
"get_tempfile",
"(",
"prefix",
"=",
"\"bmds-\"",
",",
"suffix",
"=",
"\".(d)\"",
")",
"with",
"open",
"(",
"f_in",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"as_dfile",
"(",
")",
")",
"return",
"f_in"
] | Write the generated d_file to a temporary file. | [
"Write",
"the",
"generated",
"d_file",
"to",
"a",
"temporary",
"file",
"."
] | python | train |
osrg/ryu | ryu/lib/ovs/vsctl.py | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/ovs/vsctl.py#L2271-L2279 | def _add(self, ctx, table_name, record_id, column_values):
"""
:type column_values: list of (column, value_json)
"""
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
for column, value in column_values:
ctx.add_column(ovsrec_row, column, value)
ctx.invalidate_cache() | [
"def",
"_add",
"(",
"self",
",",
"ctx",
",",
"table_name",
",",
"record_id",
",",
"column_values",
")",
":",
"vsctl_table",
"=",
"self",
".",
"_get_table",
"(",
"table_name",
")",
"ovsrec_row",
"=",
"ctx",
".",
"must_get_row",
"(",
"vsctl_table",
",",
"record_id",
")",
"for",
"column",
",",
"value",
"in",
"column_values",
":",
"ctx",
".",
"add_column",
"(",
"ovsrec_row",
",",
"column",
",",
"value",
")",
"ctx",
".",
"invalidate_cache",
"(",
")"
] | :type column_values: list of (column, value_json) | [
":",
"type",
"column_values",
":",
"list",
"of",
"(",
"column",
"value_json",
")"
] | python | train |
peopledoc/populous | populous/cli.py | https://github.com/peopledoc/populous/blob/50ea445ee973c82a36e0853ae7e2817961143b05/populous/cli.py#L79-L97 | def generators():
"""
List all the available generators.
"""
from populous import generators
base = generators.Generator
for name in dir(generators):
generator = getattr(generators, name)
if isinstance(generator, type) and issubclass(generator, base):
name = generator.__name__
doc = (generator.__doc__ or '').strip()
if doc:
click.echo("{} - {}".format(name, doc))
else:
click.echo(name) | [
"def",
"generators",
"(",
")",
":",
"from",
"populous",
"import",
"generators",
"base",
"=",
"generators",
".",
"Generator",
"for",
"name",
"in",
"dir",
"(",
"generators",
")",
":",
"generator",
"=",
"getattr",
"(",
"generators",
",",
"name",
")",
"if",
"isinstance",
"(",
"generator",
",",
"type",
")",
"and",
"issubclass",
"(",
"generator",
",",
"base",
")",
":",
"name",
"=",
"generator",
".",
"__name__",
"doc",
"=",
"(",
"generator",
".",
"__doc__",
"or",
"''",
")",
".",
"strip",
"(",
")",
"if",
"doc",
":",
"click",
".",
"echo",
"(",
"\"{} - {}\"",
".",
"format",
"(",
"name",
",",
"doc",
")",
")",
"else",
":",
"click",
".",
"echo",
"(",
"name",
")"
] | List all the available generators. | [
"List",
"all",
"the",
"available",
"generators",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/requests/_internal_utils.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/_internal_utils.py#L30-L42 | def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
"""
assert isinstance(u_string, str)
try:
u_string.encode('ascii')
return True
except UnicodeEncodeError:
return False | [
"def",
"unicode_is_ascii",
"(",
"u_string",
")",
":",
"assert",
"isinstance",
"(",
"u_string",
",",
"str",
")",
"try",
":",
"u_string",
".",
"encode",
"(",
"'ascii'",
")",
"return",
"True",
"except",
"UnicodeEncodeError",
":",
"return",
"False"
] | Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool | [
"Determine",
"if",
"unicode",
"string",
"only",
"contains",
"ASCII",
"characters",
"."
] | python | train |
intake/intake | intake/catalog/base.py | https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/catalog/base.py#L674-L715 | def _load(self):
"""Fetch metadata from remote. Entries are fetched lazily."""
# This will not immediately fetch any sources (entries). It will lazily
# fetch sources from the server in paginated blocks when this Catalog
# is iterated over. It will fetch specific sources when they are
# accessed in this Catalog via __getitem__.
if self.page_size is None:
# Fetch all source info.
params = {}
else:
# Just fetch the metadata now; fetch source info later in pages.
params = {'page_offset': 0, 'page_size': 0}
http_args = self._get_http_args(params)
response = requests.get(self.info_url, **http_args)
try:
response.raise_for_status()
except requests.HTTPError as err:
six.raise_from(RemoteCatalogError(
"Failed to fetch metadata."), err)
info = msgpack.unpackb(response.content, **unpack_kwargs)
self.metadata = info['metadata']
# The intake server now always provides a length, but the server may be
# running an older version of intake.
self._len = info.get('length')
self._entries.reset()
# If we are paginating (page_size is not None) and the server we are
# working with is new enough to support pagination, info['sources']
# should be empty. If either of those things is not true,
# info['sources'] will contain all the entries and we should cache them
# now.
if info['sources']:
# Signal that we are not paginating, even if we were asked to.
self._page_size = None
self._entries._page_cache.update(
{source['name']: RemoteCatalogEntry(
url=self.url,
getenv=self.getenv,
getshell=self.getshell,
auth=self.auth,
http_args=self.http_args, **source)
for source in info['sources']}) | [
"def",
"_load",
"(",
"self",
")",
":",
"# This will not immediately fetch any sources (entries). It will lazily",
"# fetch sources from the server in paginated blocks when this Catalog",
"# is iterated over. It will fetch specific sources when they are",
"# accessed in this Catalog via __getitem__.",
"if",
"self",
".",
"page_size",
"is",
"None",
":",
"# Fetch all source info.",
"params",
"=",
"{",
"}",
"else",
":",
"# Just fetch the metadata now; fetch source info later in pages.",
"params",
"=",
"{",
"'page_offset'",
":",
"0",
",",
"'page_size'",
":",
"0",
"}",
"http_args",
"=",
"self",
".",
"_get_http_args",
"(",
"params",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"info_url",
",",
"*",
"*",
"http_args",
")",
"try",
":",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"HTTPError",
"as",
"err",
":",
"six",
".",
"raise_from",
"(",
"RemoteCatalogError",
"(",
"\"Failed to fetch metadata.\"",
")",
",",
"err",
")",
"info",
"=",
"msgpack",
".",
"unpackb",
"(",
"response",
".",
"content",
",",
"*",
"*",
"unpack_kwargs",
")",
"self",
".",
"metadata",
"=",
"info",
"[",
"'metadata'",
"]",
"# The intake server now always provides a length, but the server may be",
"# running an older version of intake.",
"self",
".",
"_len",
"=",
"info",
".",
"get",
"(",
"'length'",
")",
"self",
".",
"_entries",
".",
"reset",
"(",
")",
"# If we are paginating (page_size is not None) and the server we are",
"# working with is new enough to support pagination, info['sources']",
"# should be empty. If either of those things is not true,",
"# info['sources'] will contain all the entries and we should cache them",
"# now.",
"if",
"info",
"[",
"'sources'",
"]",
":",
"# Signal that we are not paginating, even if we were asked to.",
"self",
".",
"_page_size",
"=",
"None",
"self",
".",
"_entries",
".",
"_page_cache",
".",
"update",
"(",
"{",
"source",
"[",
"'name'",
"]",
":",
"RemoteCatalogEntry",
"(",
"url",
"=",
"self",
".",
"url",
",",
"getenv",
"=",
"self",
".",
"getenv",
",",
"getshell",
"=",
"self",
".",
"getshell",
",",
"auth",
"=",
"self",
".",
"auth",
",",
"http_args",
"=",
"self",
".",
"http_args",
",",
"*",
"*",
"source",
")",
"for",
"source",
"in",
"info",
"[",
"'sources'",
"]",
"}",
")"
] | Fetch metadata from remote. Entries are fetched lazily. | [
"Fetch",
"metadata",
"from",
"remote",
".",
"Entries",
"are",
"fetched",
"lazily",
"."
] | python | train |
getsentry/raven-python | raven/base.py | https://github.com/getsentry/raven-python/blob/d891c20f0f930153f508e9d698d9de42e910face/raven/base.py#L752-L781 | def send_encoded(self, message, auth_header=None, **kwargs):
"""
Given an already serialized message, signs the message and passes the
payload off to ``send_remote``.
"""
client_string = 'raven-python/%s' % (raven.VERSION,)
if not auth_header:
timestamp = time.time()
auth_header = get_auth_header(
protocol=self.protocol_version,
timestamp=timestamp,
client=client_string,
api_key=self.remote.public_key,
api_secret=self.remote.secret_key,
)
headers = {
'User-Agent': client_string,
'X-Sentry-Auth': auth_header,
'Content-Encoding': self.get_content_encoding(),
'Content-Type': 'application/octet-stream',
}
return self.send_remote(
url=self.remote.store_endpoint,
data=message,
headers=headers,
**kwargs
) | [
"def",
"send_encoded",
"(",
"self",
",",
"message",
",",
"auth_header",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"client_string",
"=",
"'raven-python/%s'",
"%",
"(",
"raven",
".",
"VERSION",
",",
")",
"if",
"not",
"auth_header",
":",
"timestamp",
"=",
"time",
".",
"time",
"(",
")",
"auth_header",
"=",
"get_auth_header",
"(",
"protocol",
"=",
"self",
".",
"protocol_version",
",",
"timestamp",
"=",
"timestamp",
",",
"client",
"=",
"client_string",
",",
"api_key",
"=",
"self",
".",
"remote",
".",
"public_key",
",",
"api_secret",
"=",
"self",
".",
"remote",
".",
"secret_key",
",",
")",
"headers",
"=",
"{",
"'User-Agent'",
":",
"client_string",
",",
"'X-Sentry-Auth'",
":",
"auth_header",
",",
"'Content-Encoding'",
":",
"self",
".",
"get_content_encoding",
"(",
")",
",",
"'Content-Type'",
":",
"'application/octet-stream'",
",",
"}",
"return",
"self",
".",
"send_remote",
"(",
"url",
"=",
"self",
".",
"remote",
".",
"store_endpoint",
",",
"data",
"=",
"message",
",",
"headers",
"=",
"headers",
",",
"*",
"*",
"kwargs",
")"
] | Given an already serialized message, signs the message and passes the
payload off to ``send_remote``. | [
"Given",
"an",
"already",
"serialized",
"message",
"signs",
"the",
"message",
"and",
"passes",
"the",
"payload",
"off",
"to",
"send_remote",
"."
] | python | train |
sethmlarson/virtualbox-python | virtualbox/library.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L24723-L24745 | def put_event_multi_touch_string(self, count, contacts, scan_time):
""":py:func:`put_event_multi_touch`
in count of type int
:py:func:`put_event_multi_touch`
in contacts of type str
Contains information about all contacts:
"id1,x1,y1,inContact1,inRange1;...;idN,xN,yN,inContactN,inRangeN".
For example for two contacts: "0,10,20,1,1;1,30,40,1,1"
in scan_time of type int
:py:func:`put_event_multi_touch`
"""
if not isinstance(count, baseinteger):
raise TypeError("count can only be an instance of type baseinteger")
if not isinstance(contacts, basestring):
raise TypeError("contacts can only be an instance of type basestring")
if not isinstance(scan_time, baseinteger):
raise TypeError("scan_time can only be an instance of type baseinteger")
self._call("putEventMultiTouchString",
in_p=[count, contacts, scan_time]) | [
"def",
"put_event_multi_touch_string",
"(",
"self",
",",
"count",
",",
"contacts",
",",
"scan_time",
")",
":",
"if",
"not",
"isinstance",
"(",
"count",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"count can only be an instance of type baseinteger\"",
")",
"if",
"not",
"isinstance",
"(",
"contacts",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"contacts can only be an instance of type basestring\"",
")",
"if",
"not",
"isinstance",
"(",
"scan_time",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"scan_time can only be an instance of type baseinteger\"",
")",
"self",
".",
"_call",
"(",
"\"putEventMultiTouchString\"",
",",
"in_p",
"=",
"[",
"count",
",",
"contacts",
",",
"scan_time",
"]",
")"
] | :py:func:`put_event_multi_touch`
in count of type int
:py:func:`put_event_multi_touch`
in contacts of type str
Contains information about all contacts:
"id1,x1,y1,inContact1,inRange1;...;idN,xN,yN,inContactN,inRangeN".
For example for two contacts: "0,10,20,1,1;1,30,40,1,1"
in scan_time of type int
:py:func:`put_event_multi_touch` | [
":",
"py",
":",
"func",
":",
"put_event_multi_touch"
] | python | train |
bokeh/bokeh | bokeh/model.py | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/model.py#L838-L861 | def _visit_value_and_its_immediate_references(obj, visitor):
''' Recurse down Models, HasProps, and Python containers
The ordering in this function is to optimize performance. We check the
most comomn types (int, float, str) first so that we can quickly return in
the common case. We avoid isinstance and issubclass checks in a couple
places with `type` checks because isinstance checks can be slow.
'''
typ = type(obj)
if typ in _common_types: # short circuit on common base types
return
if typ is list or issubclass(typ, (list, tuple)): # check common containers
for item in obj:
_visit_value_and_its_immediate_references(item, visitor)
elif issubclass(typ, dict):
for key, value in iteritems(obj):
_visit_value_and_its_immediate_references(key, visitor)
_visit_value_and_its_immediate_references(value, visitor)
elif issubclass(typ, HasProps):
if issubclass(typ, Model):
visitor(obj)
else:
# this isn't a Model, so recurse into it
_visit_immediate_value_references(obj, visitor) | [
"def",
"_visit_value_and_its_immediate_references",
"(",
"obj",
",",
"visitor",
")",
":",
"typ",
"=",
"type",
"(",
"obj",
")",
"if",
"typ",
"in",
"_common_types",
":",
"# short circuit on common base types",
"return",
"if",
"typ",
"is",
"list",
"or",
"issubclass",
"(",
"typ",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# check common containers",
"for",
"item",
"in",
"obj",
":",
"_visit_value_and_its_immediate_references",
"(",
"item",
",",
"visitor",
")",
"elif",
"issubclass",
"(",
"typ",
",",
"dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"obj",
")",
":",
"_visit_value_and_its_immediate_references",
"(",
"key",
",",
"visitor",
")",
"_visit_value_and_its_immediate_references",
"(",
"value",
",",
"visitor",
")",
"elif",
"issubclass",
"(",
"typ",
",",
"HasProps",
")",
":",
"if",
"issubclass",
"(",
"typ",
",",
"Model",
")",
":",
"visitor",
"(",
"obj",
")",
"else",
":",
"# this isn't a Model, so recurse into it",
"_visit_immediate_value_references",
"(",
"obj",
",",
"visitor",
")"
] | Recurse down Models, HasProps, and Python containers
The ordering in this function is to optimize performance. We check the
most comomn types (int, float, str) first so that we can quickly return in
the common case. We avoid isinstance and issubclass checks in a couple
places with `type` checks because isinstance checks can be slow. | [
"Recurse",
"down",
"Models",
"HasProps",
"and",
"Python",
"containers"
] | python | train |
titusjan/argos | argos/inspector/pgplugins/pgctis.py | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/inspector/pgplugins/pgctis.py#L298-L312 | def setAutoRangeOff(self):
""" Turns off the auto range checkbox.
Calls _refreshNodeFromTarget, not _updateTargetFromNode, because setting auto range off
does not require a redraw of the target.
"""
# TODO: catch exceptions. How?
# /argos/hdf-eos/DeepBlue-SeaWiFS-1.0_L3_20100101_v002-20110527T191319Z.h5/aerosol_optical_thickness_stddev_ocean
if self.getRefreshBlocked():
logger.debug("setAutoRangeOff blocked for {}".format(self.nodeName))
return
if self.autoRangeCti:
self.autoRangeCti.data = False
self._forceRefreshAutoRange() | [
"def",
"setAutoRangeOff",
"(",
"self",
")",
":",
"# TODO: catch exceptions. How?",
"# /argos/hdf-eos/DeepBlue-SeaWiFS-1.0_L3_20100101_v002-20110527T191319Z.h5/aerosol_optical_thickness_stddev_ocean",
"if",
"self",
".",
"getRefreshBlocked",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"setAutoRangeOff blocked for {}\"",
".",
"format",
"(",
"self",
".",
"nodeName",
")",
")",
"return",
"if",
"self",
".",
"autoRangeCti",
":",
"self",
".",
"autoRangeCti",
".",
"data",
"=",
"False",
"self",
".",
"_forceRefreshAutoRange",
"(",
")"
] | Turns off the auto range checkbox.
Calls _refreshNodeFromTarget, not _updateTargetFromNode, because setting auto range off
does not require a redraw of the target. | [
"Turns",
"off",
"the",
"auto",
"range",
"checkbox",
".",
"Calls",
"_refreshNodeFromTarget",
"not",
"_updateTargetFromNode",
"because",
"setting",
"auto",
"range",
"off",
"does",
"not",
"require",
"a",
"redraw",
"of",
"the",
"target",
"."
] | python | train |
seleniumbase/SeleniumBase | seleniumbase/plugins/db_reporting_plugin.py | https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/plugins/db_reporting_plugin.py#L49-L57 | def begin(self):
""" At the start of the run, we want to record the test
execution information in the database. """
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload) | [
"def",
"begin",
"(",
"self",
")",
":",
"exec_payload",
"=",
"ExecutionQueryPayload",
"(",
")",
"exec_payload",
".",
"execution_start_time",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
"self",
".",
"execution_start_time",
"=",
"exec_payload",
".",
"execution_start_time",
"exec_payload",
".",
"guid",
"=",
"self",
".",
"execution_guid",
"exec_payload",
".",
"username",
"=",
"getpass",
".",
"getuser",
"(",
")",
"self",
".",
"testcase_manager",
".",
"insert_execution_data",
"(",
"exec_payload",
")"
] | At the start of the run, we want to record the test
execution information in the database. | [
"At",
"the",
"start",
"of",
"the",
"run",
"we",
"want",
"to",
"record",
"the",
"test",
"execution",
"information",
"in",
"the",
"database",
"."
] | python | train |
rwl/pylon | pylon/io/dot.py | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/dot.py#L125-L133 | def write_branch_data(self, file, padding=" "):
""" Writes branch data in Graphviz DOT language.
"""
attrs = ['%s="%s"' % (k,v) for k,v in self.branch_attr.iteritems()]
attr_str = ", ".join(attrs)
for br in self.case.branches:
file.write("%s%s -> %s [%s];\n" % \
(padding, br.from_bus.name, br.to_bus.name, attr_str)) | [
"def",
"write_branch_data",
"(",
"self",
",",
"file",
",",
"padding",
"=",
"\" \"",
")",
":",
"attrs",
"=",
"[",
"'%s=\"%s\"'",
"%",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"branch_attr",
".",
"iteritems",
"(",
")",
"]",
"attr_str",
"=",
"\", \"",
".",
"join",
"(",
"attrs",
")",
"for",
"br",
"in",
"self",
".",
"case",
".",
"branches",
":",
"file",
".",
"write",
"(",
"\"%s%s -> %s [%s];\\n\"",
"%",
"(",
"padding",
",",
"br",
".",
"from_bus",
".",
"name",
",",
"br",
".",
"to_bus",
".",
"name",
",",
"attr_str",
")",
")"
] | Writes branch data in Graphviz DOT language. | [
"Writes",
"branch",
"data",
"in",
"Graphviz",
"DOT",
"language",
"."
] | python | train |
pazz/alot | alot/account.py | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/account.py#L316-L322 | def store_sent_mail(self, mail):
"""
stores mail (:class:`email.message.Message` or str) in send-store if
:attr:`sent_box` is set.
"""
if self.sent_box is not None:
return self.store_mail(self.sent_box, mail) | [
"def",
"store_sent_mail",
"(",
"self",
",",
"mail",
")",
":",
"if",
"self",
".",
"sent_box",
"is",
"not",
"None",
":",
"return",
"self",
".",
"store_mail",
"(",
"self",
".",
"sent_box",
",",
"mail",
")"
] | stores mail (:class:`email.message.Message` or str) in send-store if
:attr:`sent_box` is set. | [
"stores",
"mail",
"(",
":",
"class",
":",
"email",
".",
"message",
".",
"Message",
"or",
"str",
")",
"in",
"send",
"-",
"store",
"if",
":",
"attr",
":",
"sent_box",
"is",
"set",
"."
] | python | train |
psss/fmf | fmf/utils.py | https://github.com/psss/fmf/blob/419f2f195d92339b8f9e5d11c0bea0f303e5fd75/fmf/utils.py#L338-L355 | def _create_logger(name='fmf', level=None):
""" Create fmf logger """
# Create logger, handler and formatter
logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setFormatter(Logging.ColoredFormatter())
logger.addHandler(handler)
# Save log levels in the logger itself (backward compatibility)
for level in Logging.LEVELS:
setattr(logger, level, getattr(logging, level))
# Additional logging constants and methods for cache and xmlrpc
logger.DATA = LOG_DATA
logger.CACHE = LOG_CACHE
logger.ALL = LOG_ALL
logger.cache = lambda message: logger.log(LOG_CACHE, message) # NOQA
logger.data = lambda message: logger.log(LOG_DATA, message) # NOQA
logger.all = lambda message: logger.log(LOG_ALL, message) # NOQA
return logger | [
"def",
"_create_logger",
"(",
"name",
"=",
"'fmf'",
",",
"level",
"=",
"None",
")",
":",
"# Create logger, handler and formatter",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"handler",
".",
"setFormatter",
"(",
"Logging",
".",
"ColoredFormatter",
"(",
")",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"# Save log levels in the logger itself (backward compatibility)",
"for",
"level",
"in",
"Logging",
".",
"LEVELS",
":",
"setattr",
"(",
"logger",
",",
"level",
",",
"getattr",
"(",
"logging",
",",
"level",
")",
")",
"# Additional logging constants and methods for cache and xmlrpc",
"logger",
".",
"DATA",
"=",
"LOG_DATA",
"logger",
".",
"CACHE",
"=",
"LOG_CACHE",
"logger",
".",
"ALL",
"=",
"LOG_ALL",
"logger",
".",
"cache",
"=",
"lambda",
"message",
":",
"logger",
".",
"log",
"(",
"LOG_CACHE",
",",
"message",
")",
"# NOQA",
"logger",
".",
"data",
"=",
"lambda",
"message",
":",
"logger",
".",
"log",
"(",
"LOG_DATA",
",",
"message",
")",
"# NOQA",
"logger",
".",
"all",
"=",
"lambda",
"message",
":",
"logger",
".",
"log",
"(",
"LOG_ALL",
",",
"message",
")",
"# NOQA",
"return",
"logger"
] | Create fmf logger | [
"Create",
"fmf",
"logger"
] | python | train |
SheffieldML/GPy | GPy/util/netpbmfile.py | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/util/netpbmfile.py#L265-L272 | def _tofile(self, fh, pam=False):
"""Write Netbm file."""
fh.seek(0)
fh.write(self._header(pam))
data = self.asarray(copy=False)
if self.maxval == 1:
data = numpy.packbits(data, axis=-1)
data.tofile(fh) | [
"def",
"_tofile",
"(",
"self",
",",
"fh",
",",
"pam",
"=",
"False",
")",
":",
"fh",
".",
"seek",
"(",
"0",
")",
"fh",
".",
"write",
"(",
"self",
".",
"_header",
"(",
"pam",
")",
")",
"data",
"=",
"self",
".",
"asarray",
"(",
"copy",
"=",
"False",
")",
"if",
"self",
".",
"maxval",
"==",
"1",
":",
"data",
"=",
"numpy",
".",
"packbits",
"(",
"data",
",",
"axis",
"=",
"-",
"1",
")",
"data",
".",
"tofile",
"(",
"fh",
")"
] | Write Netbm file. | [
"Write",
"Netbm",
"file",
"."
] | python | train |
CalebBell/ht | ht/hx.py | https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/hx.py#L3254-L3362 | def NTU_from_P_E(P1, R1, Ntp, optimal=True):
r'''Returns the number of transfer units of a TEMA E type heat exchanger
with a specified (for side 1) thermal effectiveness `P1`, heat capacity
ratio `R1`, the number of tube passes `Ntp`, and for the two-pass case
whether or not the inlets are arranged optimally. The supported cases are
as follows:
* 1-1 TEMA E, shell fluid mixed
* 1-2 TEMA E, shell fluid mixed (this configuration is symmetric)
* 1-2 TEMA E, shell fluid split into two steams individually mixed
* 1-3 TEMA E, shell and tube fluids mixed, one parallel pass and two
counterflow passes (efficient)
* 1-3 TEMA E, shell and tube fluids mixed, two parallel passes and one
counteflow pass (inefficient)
* 1-N TEMA E, shall and tube fluids mixed, efficient counterflow
orientation, N an even number
Two of these cases have analytical solutions; the rest use numerical
solvers of varying quality.
The analytical solution to 1-1 TEMA E, shell fluid mixed (the same as pure
counterflow):
.. math::
NTU_1 = - \frac{1}{R_{1} - 1} \log{\left (\frac{P_{1} R_{1} - 1}{P_{1}
- 1} \right )}
1-2 TEMA E, shell fluid mixed:
.. math::
NTU_1 = \frac{2}{\sqrt{R_{1}^{2} + 1}} \log{\left (\sqrt{\frac{P_{1}
R_{1} - P_{1} \sqrt{R_{1}^{2} + 1} + P_{1} - 2}{P_{1} R_{1} + P_{1}
\sqrt{R_{1}^{2} + 1} + P_{1} - 2}} \right )}
Parameters
----------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
Ntp : int
Number of tube passes, 1, 2, 3, 4, or an even number [-]
optimal : bool, optional
Whether or not the arrangement is configured to give more of a
countercurrent and efficient (True) case or an inefficient parallel
case, [-]
Returns
-------
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Notes
-----
For odd numbers of tube passes greater than 3, an exception is raised.
For the 2 pass, unoptimal case, a bounded solver is used with NTU1 between
1E-11 and 100; the solution to any feasible P1 was found to lie in there.
For the 4 or a higher even number of pass case, the upper limit on NTU1
is 1000; this solver works pretty well, but as NTU1 reaches its limit the
change in P1 is so small a smaller but also correct solution is often
returned.
For both the optimal and unoptimal 3 tube pass case, a solution is only
returned if NTU1 is between 1E-11 and 10. These functions are extremely
mathematically frustrating, and as NTU1 rises above 10 catastrophic
cancellation quickly results in this expression finding a ZeroDivisionError.
The use of arbitrary prevision helps little - quickly 1000 digits are needed,
and then 1000000 digits, and so one. Using SymPy's rational number support
works better but is extremely slow for these complicated solutions.
Nevertheless, so long as a solution is between 1E-11 and 10, the solver is
quite robust.
Examples
--------
>>> NTU_from_P_E(P1=.58, R1=1/3., Ntp=2)
1.0381979240816719
'''
NTU_min = 1E-11
function = temperature_effectiveness_TEMA_E
if Ntp == 1:
return NTU_from_P_basic(P1, R1, subtype='counterflow')
elif Ntp == 2 and optimal:
# Nice analytical solution is available
# There are actualy two roots but one of them is complex
x1 = R1*R1 + 1.
return 2.*log(((P1*R1 - P1*x1**0.5 + P1 - 2.)/(P1*R1 + P1*x1**0.5 + P1 - 2.))**0.5)*(x1)**-.5
elif Ntp == 2 and not optimal:
NTU_max = 1E2
# Can't find anywhere it needs to go above 70 to reach the maximum
elif Ntp == 3 and optimal:
# no pade could be found, just about the worst-conditioned problem
# I've ever found
# Higher starting values result in errors
NTU_max = 10
elif Ntp == 3 and not optimal:
# no pade could be found, just about the worst-conditioned problem
# I've ever found
NTU_max = 10
elif Ntp == 4 or Ntp %2 == 0:
NTU_max = 1E3
else:
raise Exception('For TEMA E shells with an odd number of tube passes more than 3, no solution is implemented.')
return _NTU_from_P_solver(P1, R1, NTU_min, NTU_max, function, Ntp=Ntp, optimal=optimal) | [
"def",
"NTU_from_P_E",
"(",
"P1",
",",
"R1",
",",
"Ntp",
",",
"optimal",
"=",
"True",
")",
":",
"NTU_min",
"=",
"1E-11",
"function",
"=",
"temperature_effectiveness_TEMA_E",
"if",
"Ntp",
"==",
"1",
":",
"return",
"NTU_from_P_basic",
"(",
"P1",
",",
"R1",
",",
"subtype",
"=",
"'counterflow'",
")",
"elif",
"Ntp",
"==",
"2",
"and",
"optimal",
":",
"# Nice analytical solution is available",
"# There are actualy two roots but one of them is complex",
"x1",
"=",
"R1",
"*",
"R1",
"+",
"1.",
"return",
"2.",
"*",
"log",
"(",
"(",
"(",
"P1",
"*",
"R1",
"-",
"P1",
"*",
"x1",
"**",
"0.5",
"+",
"P1",
"-",
"2.",
")",
"/",
"(",
"P1",
"*",
"R1",
"+",
"P1",
"*",
"x1",
"**",
"0.5",
"+",
"P1",
"-",
"2.",
")",
")",
"**",
"0.5",
")",
"*",
"(",
"x1",
")",
"**",
"-",
".5",
"elif",
"Ntp",
"==",
"2",
"and",
"not",
"optimal",
":",
"NTU_max",
"=",
"1E2",
"# Can't find anywhere it needs to go above 70 to reach the maximum",
"elif",
"Ntp",
"==",
"3",
"and",
"optimal",
":",
"# no pade could be found, just about the worst-conditioned problem",
"# I've ever found",
"# Higher starting values result in errors",
"NTU_max",
"=",
"10",
"elif",
"Ntp",
"==",
"3",
"and",
"not",
"optimal",
":",
"# no pade could be found, just about the worst-conditioned problem",
"# I've ever found",
"NTU_max",
"=",
"10",
"elif",
"Ntp",
"==",
"4",
"or",
"Ntp",
"%",
"2",
"==",
"0",
":",
"NTU_max",
"=",
"1E3",
"else",
":",
"raise",
"Exception",
"(",
"'For TEMA E shells with an odd number of tube passes more than 3, no solution is implemented.'",
")",
"return",
"_NTU_from_P_solver",
"(",
"P1",
",",
"R1",
",",
"NTU_min",
",",
"NTU_max",
",",
"function",
",",
"Ntp",
"=",
"Ntp",
",",
"optimal",
"=",
"optimal",
")"
] | r'''Returns the number of transfer units of a TEMA E type heat exchanger
with a specified (for side 1) thermal effectiveness `P1`, heat capacity
ratio `R1`, the number of tube passes `Ntp`, and for the two-pass case
whether or not the inlets are arranged optimally. The supported cases are
as follows:
* 1-1 TEMA E, shell fluid mixed
* 1-2 TEMA E, shell fluid mixed (this configuration is symmetric)
* 1-2 TEMA E, shell fluid split into two steams individually mixed
* 1-3 TEMA E, shell and tube fluids mixed, one parallel pass and two
counterflow passes (efficient)
* 1-3 TEMA E, shell and tube fluids mixed, two parallel passes and one
counteflow pass (inefficient)
* 1-N TEMA E, shall and tube fluids mixed, efficient counterflow
orientation, N an even number
Two of these cases have analytical solutions; the rest use numerical
solvers of varying quality.
The analytical solution to 1-1 TEMA E, shell fluid mixed (the same as pure
counterflow):
.. math::
NTU_1 = - \frac{1}{R_{1} - 1} \log{\left (\frac{P_{1} R_{1} - 1}{P_{1}
- 1} \right )}
1-2 TEMA E, shell fluid mixed:
.. math::
NTU_1 = \frac{2}{\sqrt{R_{1}^{2} + 1}} \log{\left (\sqrt{\frac{P_{1}
R_{1} - P_{1} \sqrt{R_{1}^{2} + 1} + P_{1} - 2}{P_{1} R_{1} + P_{1}
\sqrt{R_{1}^{2} + 1} + P_{1} - 2}} \right )}
Parameters
----------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
Ntp : int
Number of tube passes, 1, 2, 3, 4, or an even number [-]
optimal : bool, optional
Whether or not the arrangement is configured to give more of a
countercurrent and efficient (True) case or an inefficient parallel
case, [-]
Returns
-------
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Notes
-----
For odd numbers of tube passes greater than 3, an exception is raised.
For the 2 pass, unoptimal case, a bounded solver is used with NTU1 between
1E-11 and 100; the solution to any feasible P1 was found to lie in there.
For the 4 or a higher even number of pass case, the upper limit on NTU1
is 1000; this solver works pretty well, but as NTU1 reaches its limit the
change in P1 is so small a smaller but also correct solution is often
returned.
For both the optimal and unoptimal 3 tube pass case, a solution is only
returned if NTU1 is between 1E-11 and 10. These functions are extremely
mathematically frustrating, and as NTU1 rises above 10 catastrophic
cancellation quickly results in this expression finding a ZeroDivisionError.
The use of arbitrary prevision helps little - quickly 1000 digits are needed,
and then 1000000 digits, and so one. Using SymPy's rational number support
works better but is extremely slow for these complicated solutions.
Nevertheless, so long as a solution is between 1E-11 and 10, the solver is
quite robust.
Examples
--------
>>> NTU_from_P_E(P1=.58, R1=1/3., Ntp=2)
1.0381979240816719 | [
"r",
"Returns",
"the",
"number",
"of",
"transfer",
"units",
"of",
"a",
"TEMA",
"E",
"type",
"heat",
"exchanger",
"with",
"a",
"specified",
"(",
"for",
"side",
"1",
")",
"thermal",
"effectiveness",
"P1",
"heat",
"capacity",
"ratio",
"R1",
"the",
"number",
"of",
"tube",
"passes",
"Ntp",
"and",
"for",
"the",
"two",
"-",
"pass",
"case",
"whether",
"or",
"not",
"the",
"inlets",
"are",
"arranged",
"optimally",
".",
"The",
"supported",
"cases",
"are",
"as",
"follows",
":",
"*",
"1",
"-",
"1",
"TEMA",
"E",
"shell",
"fluid",
"mixed",
"*",
"1",
"-",
"2",
"TEMA",
"E",
"shell",
"fluid",
"mixed",
"(",
"this",
"configuration",
"is",
"symmetric",
")",
"*",
"1",
"-",
"2",
"TEMA",
"E",
"shell",
"fluid",
"split",
"into",
"two",
"steams",
"individually",
"mixed",
"*",
"1",
"-",
"3",
"TEMA",
"E",
"shell",
"and",
"tube",
"fluids",
"mixed",
"one",
"parallel",
"pass",
"and",
"two",
"counterflow",
"passes",
"(",
"efficient",
")",
"*",
"1",
"-",
"3",
"TEMA",
"E",
"shell",
"and",
"tube",
"fluids",
"mixed",
"two",
"parallel",
"passes",
"and",
"one",
"counteflow",
"pass",
"(",
"inefficient",
")",
"*",
"1",
"-",
"N",
"TEMA",
"E",
"shall",
"and",
"tube",
"fluids",
"mixed",
"efficient",
"counterflow",
"orientation",
"N",
"an",
"even",
"number",
"Two",
"of",
"these",
"cases",
"have",
"analytical",
"solutions",
";",
"the",
"rest",
"use",
"numerical",
"solvers",
"of",
"varying",
"quality",
".",
"The",
"analytical",
"solution",
"to",
"1",
"-",
"1",
"TEMA",
"E",
"shell",
"fluid",
"mixed",
"(",
"the",
"same",
"as",
"pure",
"counterflow",
")",
":",
"..",
"math",
"::",
"NTU_1",
"=",
"-",
"\\",
"frac",
"{",
"1",
"}",
"{",
"R_",
"{",
"1",
"}",
"-",
"1",
"}",
"\\",
"log",
"{",
"\\",
"left",
"(",
"\\",
"frac",
"{",
"P_",
"{",
"1",
"}",
"R_",
"{",
"1",
"}",
"-",
"1",
"}",
"{",
"P_",
"{",
"1",
"}",
"-",
"1",
"}",
"\\",
"right",
")",
"}",
"1",
"-",
"2",
"TEMA",
"E",
"shell",
"fluid",
"mixed",
":",
"..",
"math",
"::",
"NTU_1",
"=",
"\\",
"frac",
"{",
"2",
"}",
"{",
"\\",
"sqrt",
"{",
"R_",
"{",
"1",
"}",
"^",
"{",
"2",
"}",
"+",
"1",
"}}",
"\\",
"log",
"{",
"\\",
"left",
"(",
"\\",
"sqrt",
"{",
"\\",
"frac",
"{",
"P_",
"{",
"1",
"}",
"R_",
"{",
"1",
"}",
"-",
"P_",
"{",
"1",
"}",
"\\",
"sqrt",
"{",
"R_",
"{",
"1",
"}",
"^",
"{",
"2",
"}",
"+",
"1",
"}",
"+",
"P_",
"{",
"1",
"}",
"-",
"2",
"}",
"{",
"P_",
"{",
"1",
"}",
"R_",
"{",
"1",
"}",
"+",
"P_",
"{",
"1",
"}",
"\\",
"sqrt",
"{",
"R_",
"{",
"1",
"}",
"^",
"{",
"2",
"}",
"+",
"1",
"}",
"+",
"P_",
"{",
"1",
"}",
"-",
"2",
"}}",
"\\",
"right",
")",
"}",
"Parameters",
"----------",
"P1",
":",
"float",
"Thermal",
"effectiveness",
"of",
"the",
"heat",
"exchanger",
"in",
"the",
"P",
"-",
"NTU",
"method",
"calculated",
"with",
"respect",
"to",
"stream",
"1",
"[",
"-",
"]",
"R1",
":",
"float",
"Heat",
"capacity",
"ratio",
"of",
"the",
"heat",
"exchanger",
"in",
"the",
"P",
"-",
"NTU",
"method",
"calculated",
"with",
"respect",
"to",
"stream",
"1",
"(",
"shell",
"side",
"=",
"1",
"tube",
"side",
"=",
"2",
")",
"[",
"-",
"]",
"Ntp",
":",
"int",
"Number",
"of",
"tube",
"passes",
"1",
"2",
"3",
"4",
"or",
"an",
"even",
"number",
"[",
"-",
"]",
"optimal",
":",
"bool",
"optional",
"Whether",
"or",
"not",
"the",
"arrangement",
"is",
"configured",
"to",
"give",
"more",
"of",
"a",
"countercurrent",
"and",
"efficient",
"(",
"True",
")",
"case",
"or",
"an",
"inefficient",
"parallel",
"case",
"[",
"-",
"]"
] | python | train |
nerdvegas/rez | src/rez/utils/backcompat.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/backcompat.py#L20-L37 | def convert_old_variant_handle(handle_dict):
"""Convert a variant handle from serialize_version < 4.0."""
old_variables = handle_dict.get("variables", {})
variables = dict(repository_type="filesystem")
for old_key, key in variant_key_conversions.iteritems():
value = old_variables.get(old_key)
#if value is not None:
variables[key] = value
path = handle_dict["path"]
filename = os.path.basename(path)
if os.path.splitext(filename)[0] == "package":
key = "filesystem.variant"
else:
key = "filesystem.variant.combined"
return dict(key=key, variables=variables) | [
"def",
"convert_old_variant_handle",
"(",
"handle_dict",
")",
":",
"old_variables",
"=",
"handle_dict",
".",
"get",
"(",
"\"variables\"",
",",
"{",
"}",
")",
"variables",
"=",
"dict",
"(",
"repository_type",
"=",
"\"filesystem\"",
")",
"for",
"old_key",
",",
"key",
"in",
"variant_key_conversions",
".",
"iteritems",
"(",
")",
":",
"value",
"=",
"old_variables",
".",
"get",
"(",
"old_key",
")",
"#if value is not None:",
"variables",
"[",
"key",
"]",
"=",
"value",
"path",
"=",
"handle_dict",
"[",
"\"path\"",
"]",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"==",
"\"package\"",
":",
"key",
"=",
"\"filesystem.variant\"",
"else",
":",
"key",
"=",
"\"filesystem.variant.combined\"",
"return",
"dict",
"(",
"key",
"=",
"key",
",",
"variables",
"=",
"variables",
")"
] | Convert a variant handle from serialize_version < 4.0. | [
"Convert",
"a",
"variant",
"handle",
"from",
"serialize_version",
"<",
"4",
".",
"0",
"."
] | python | train |
spyder-ide/spyder | spyder/plugins/explorer/widgets.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/explorer/widgets.py#L197-L200 | def set_name_filters(self, name_filters):
"""Set name filters"""
self.name_filters = name_filters
self.fsmodel.setNameFilters(name_filters) | [
"def",
"set_name_filters",
"(",
"self",
",",
"name_filters",
")",
":",
"self",
".",
"name_filters",
"=",
"name_filters",
"self",
".",
"fsmodel",
".",
"setNameFilters",
"(",
"name_filters",
")"
] | Set name filters | [
"Set",
"name",
"filters"
] | python | train |
xenadevel/PyXenaManager | xenamanager/api/xena_cli.py | https://github.com/xenadevel/PyXenaManager/blob/384ca265f73044b8a8b471f5dd7a6103fc54f4df/xenamanager/api/xena_cli.py#L34-L43 | def add_chassis(self, chassis):
"""
:param ip: chassis object
"""
self.chassis_list[chassis] = XenaSocket(self.logger, chassis.ip, chassis.port)
self.chassis_list[chassis].connect()
KeepAliveThread(self.chassis_list[chassis]).start()
self.send_command(chassis, 'c_logon', '"{}"'.format(chassis.password))
self.send_command(chassis, 'c_owner', '"{}"'.format(chassis.owner)) | [
"def",
"add_chassis",
"(",
"self",
",",
"chassis",
")",
":",
"self",
".",
"chassis_list",
"[",
"chassis",
"]",
"=",
"XenaSocket",
"(",
"self",
".",
"logger",
",",
"chassis",
".",
"ip",
",",
"chassis",
".",
"port",
")",
"self",
".",
"chassis_list",
"[",
"chassis",
"]",
".",
"connect",
"(",
")",
"KeepAliveThread",
"(",
"self",
".",
"chassis_list",
"[",
"chassis",
"]",
")",
".",
"start",
"(",
")",
"self",
".",
"send_command",
"(",
"chassis",
",",
"'c_logon'",
",",
"'\"{}\"'",
".",
"format",
"(",
"chassis",
".",
"password",
")",
")",
"self",
".",
"send_command",
"(",
"chassis",
",",
"'c_owner'",
",",
"'\"{}\"'",
".",
"format",
"(",
"chassis",
".",
"owner",
")",
")"
] | :param ip: chassis object | [
":",
"param",
"ip",
":",
"chassis",
"object"
] | python | train |
usc-isi-i2/etk | etk/extractors/readability/readability.py | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/readability/readability.py#L146-L217 | def summary(self, html_partial=False):
"""Generate the summary of the html docuemnt
:param html_partial: return only the div of the document, don't wrap
in html and body tags.
"""
try:
ruthless = True
#Added recall priority flag
recallPriority = self.recallPriority
if recallPriority:
ruthless = False
self.TEXT_LENGTH_THRESHOLD = 2
self.RETRY_LENGTH = 25
while True:
self._html(True)
for i in self.tags(self.html, 'script', 'style'):
i.drop_tree()
for i in self.tags(self.html, 'body'):
i.set('id', 'readabilityBody')
if ruthless:
self.remove_unlikely_candidates()
self.transform_misused_divs_into_paragraphs()
candidates = self.score_paragraphs()
best_candidates = self.select_best_candidates(candidates)
if best_candidates and not recallPriority:
article = self.get_article_from_candidates(candidates,best_candidates,html_partial)
else:
if ruthless and not recallPriority:
log.debug("ruthless removal did not work. ")
ruthless = False
self.debug(
("ended up stripping too much - "
"going for a safer _parse"))
# try again
continue
else:
log.debug(
("Ruthless and lenient parsing did not work. "
"Returning raw html"))
article = self.html.find('body')
if article is None:
article = self.html
cleaned_article = self.sanitize(article, candidates)
# print(cleaned_article)
article_length = len(cleaned_article or '')
retry_length = self.options.get(
'retry_length',
self.RETRY_LENGTH)
of_acceptable_length = article_length >= retry_length
if ruthless and not of_acceptable_length:
ruthless = False
continue
else:
return cleaned_article
except Exception as e:
print("error: %s", e)
log.exception('error getting summary: ')
raise Exception(Unparseable(str(e)), None, sys.exc_info()[2]) | [
"def",
"summary",
"(",
"self",
",",
"html_partial",
"=",
"False",
")",
":",
"try",
":",
"ruthless",
"=",
"True",
"#Added recall priority flag",
"recallPriority",
"=",
"self",
".",
"recallPriority",
"if",
"recallPriority",
":",
"ruthless",
"=",
"False",
"self",
".",
"TEXT_LENGTH_THRESHOLD",
"=",
"2",
"self",
".",
"RETRY_LENGTH",
"=",
"25",
"while",
"True",
":",
"self",
".",
"_html",
"(",
"True",
")",
"for",
"i",
"in",
"self",
".",
"tags",
"(",
"self",
".",
"html",
",",
"'script'",
",",
"'style'",
")",
":",
"i",
".",
"drop_tree",
"(",
")",
"for",
"i",
"in",
"self",
".",
"tags",
"(",
"self",
".",
"html",
",",
"'body'",
")",
":",
"i",
".",
"set",
"(",
"'id'",
",",
"'readabilityBody'",
")",
"if",
"ruthless",
":",
"self",
".",
"remove_unlikely_candidates",
"(",
")",
"self",
".",
"transform_misused_divs_into_paragraphs",
"(",
")",
"candidates",
"=",
"self",
".",
"score_paragraphs",
"(",
")",
"best_candidates",
"=",
"self",
".",
"select_best_candidates",
"(",
"candidates",
")",
"if",
"best_candidates",
"and",
"not",
"recallPriority",
":",
"article",
"=",
"self",
".",
"get_article_from_candidates",
"(",
"candidates",
",",
"best_candidates",
",",
"html_partial",
")",
"else",
":",
"if",
"ruthless",
"and",
"not",
"recallPriority",
":",
"log",
".",
"debug",
"(",
"\"ruthless removal did not work. \"",
")",
"ruthless",
"=",
"False",
"self",
".",
"debug",
"(",
"(",
"\"ended up stripping too much - \"",
"\"going for a safer _parse\"",
")",
")",
"# try again",
"continue",
"else",
":",
"log",
".",
"debug",
"(",
"(",
"\"Ruthless and lenient parsing did not work. \"",
"\"Returning raw html\"",
")",
")",
"article",
"=",
"self",
".",
"html",
".",
"find",
"(",
"'body'",
")",
"if",
"article",
"is",
"None",
":",
"article",
"=",
"self",
".",
"html",
"cleaned_article",
"=",
"self",
".",
"sanitize",
"(",
"article",
",",
"candidates",
")",
"# print(cleaned_article)",
"article_length",
"=",
"len",
"(",
"cleaned_article",
"or",
"''",
")",
"retry_length",
"=",
"self",
".",
"options",
".",
"get",
"(",
"'retry_length'",
",",
"self",
".",
"RETRY_LENGTH",
")",
"of_acceptable_length",
"=",
"article_length",
">=",
"retry_length",
"if",
"ruthless",
"and",
"not",
"of_acceptable_length",
":",
"ruthless",
"=",
"False",
"continue",
"else",
":",
"return",
"cleaned_article",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"error: %s\"",
",",
"e",
")",
"log",
".",
"exception",
"(",
"'error getting summary: '",
")",
"raise",
"Exception",
"(",
"Unparseable",
"(",
"str",
"(",
"e",
")",
")",
",",
"None",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")"
] | Generate the summary of the html docuemnt
:param html_partial: return only the div of the document, don't wrap
in html and body tags. | [
"Generate",
"the",
"summary",
"of",
"the",
"html",
"docuemnt"
] | python | train |
swisscom/cleanerversion | versions/models.py | https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/models.py#L500-L516 | def _fetch_all(self):
"""
Completely overrides the QuerySet._fetch_all method by adding the
timestamp to all objects
:return: See django.db.models.query.QuerySet._fetch_all for return
values
"""
if self._result_cache is None:
self._result_cache = list(self.iterator())
# TODO: Do we have to test for ValuesListIterable, ValuesIterable,
# and FlatValuesListIterable here?
if self._iterable_class == ModelIterable:
for x in self._result_cache:
self._set_item_querytime(x)
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects() | [
"def",
"_fetch_all",
"(",
"self",
")",
":",
"if",
"self",
".",
"_result_cache",
"is",
"None",
":",
"self",
".",
"_result_cache",
"=",
"list",
"(",
"self",
".",
"iterator",
"(",
")",
")",
"# TODO: Do we have to test for ValuesListIterable, ValuesIterable,",
"# and FlatValuesListIterable here?",
"if",
"self",
".",
"_iterable_class",
"==",
"ModelIterable",
":",
"for",
"x",
"in",
"self",
".",
"_result_cache",
":",
"self",
".",
"_set_item_querytime",
"(",
"x",
")",
"if",
"self",
".",
"_prefetch_related_lookups",
"and",
"not",
"self",
".",
"_prefetch_done",
":",
"self",
".",
"_prefetch_related_objects",
"(",
")"
] | Completely overrides the QuerySet._fetch_all method by adding the
timestamp to all objects
:return: See django.db.models.query.QuerySet._fetch_all for return
values | [
"Completely",
"overrides",
"the",
"QuerySet",
".",
"_fetch_all",
"method",
"by",
"adding",
"the",
"timestamp",
"to",
"all",
"objects"
] | python | train |
Alignak-monitoring/alignak | alignak/objects/realm.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/realm.py#L474-L519 | def get_links_for_a_scheduler(self, pollers, reactionners, brokers):
"""Get a configuration dictionary with pollers, reactionners and brokers links
for a scheduler
:return: dict containing pollers, reactionners and brokers links (key is satellite id)
:rtype: dict
"""
# Create void satellite links
cfg = {
'pollers': {},
'reactionners': {},
'brokers': {},
}
# Our self.daemons are only identifiers... that we use to fill the satellite links
try:
for poller in self.pollers + self.get_potential_satellites_by_type(pollers, "poller"):
if poller in pollers:
poller = pollers[poller]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
for reactionner in self.reactionners + self.get_potential_satellites_by_type(
reactionners, "reactionner"):
if reactionner in reactionners:
reactionner = reactionners[reactionner]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
for broker in self.brokers + self.get_potential_satellites_by_type(brokers, "broker"):
if broker in brokers:
broker = brokers[broker]
cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
except Exception as exp: # pylint: disable=broad-except
logger.exception("realm.get_links_for_a_scheduler: %s", exp)
# for poller in self.get_potential_satellites_by_type(pollers, "poller"):
# logger.info("Poller: %s", poller)
# cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
#
# for reactionner in self.get_potential_satellites_by_type(reactionners, "reactionner"):
# cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
#
# for broker in self.get_potential_satellites_by_type(brokers, "broker"):
# cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
return cfg | [
"def",
"get_links_for_a_scheduler",
"(",
"self",
",",
"pollers",
",",
"reactionners",
",",
"brokers",
")",
":",
"# Create void satellite links",
"cfg",
"=",
"{",
"'pollers'",
":",
"{",
"}",
",",
"'reactionners'",
":",
"{",
"}",
",",
"'brokers'",
":",
"{",
"}",
",",
"}",
"# Our self.daemons are only identifiers... that we use to fill the satellite links",
"try",
":",
"for",
"poller",
"in",
"self",
".",
"pollers",
"+",
"self",
".",
"get_potential_satellites_by_type",
"(",
"pollers",
",",
"\"poller\"",
")",
":",
"if",
"poller",
"in",
"pollers",
":",
"poller",
"=",
"pollers",
"[",
"poller",
"]",
"cfg",
"[",
"'pollers'",
"]",
"[",
"poller",
".",
"uuid",
"]",
"=",
"poller",
".",
"give_satellite_cfg",
"(",
")",
"for",
"reactionner",
"in",
"self",
".",
"reactionners",
"+",
"self",
".",
"get_potential_satellites_by_type",
"(",
"reactionners",
",",
"\"reactionner\"",
")",
":",
"if",
"reactionner",
"in",
"reactionners",
":",
"reactionner",
"=",
"reactionners",
"[",
"reactionner",
"]",
"cfg",
"[",
"'reactionners'",
"]",
"[",
"reactionner",
".",
"uuid",
"]",
"=",
"reactionner",
".",
"give_satellite_cfg",
"(",
")",
"for",
"broker",
"in",
"self",
".",
"brokers",
"+",
"self",
".",
"get_potential_satellites_by_type",
"(",
"brokers",
",",
"\"broker\"",
")",
":",
"if",
"broker",
"in",
"brokers",
":",
"broker",
"=",
"brokers",
"[",
"broker",
"]",
"cfg",
"[",
"'brokers'",
"]",
"[",
"broker",
".",
"uuid",
"]",
"=",
"broker",
".",
"give_satellite_cfg",
"(",
")",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"logger",
".",
"exception",
"(",
"\"realm.get_links_for_a_scheduler: %s\"",
",",
"exp",
")",
"# for poller in self.get_potential_satellites_by_type(pollers, \"poller\"):",
"# logger.info(\"Poller: %s\", poller)",
"# cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()",
"#",
"# for reactionner in self.get_potential_satellites_by_type(reactionners, \"reactionner\"):",
"# cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()",
"#",
"# for broker in self.get_potential_satellites_by_type(brokers, \"broker\"):",
"# cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()",
"return",
"cfg"
] | Get a configuration dictionary with pollers, reactionners and brokers links
for a scheduler
:return: dict containing pollers, reactionners and brokers links (key is satellite id)
:rtype: dict | [
"Get",
"a",
"configuration",
"dictionary",
"with",
"pollers",
"reactionners",
"and",
"brokers",
"links",
"for",
"a",
"scheduler"
] | python | train |
nvbn/thefuck | thefuck/corrector.py | https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/corrector.py#L81-L92 | def get_corrected_commands(command):
"""Returns generator with sorted and unique corrected commands.
:type command: thefuck.types.Command
:rtype: Iterable[thefuck.types.CorrectedCommand]
"""
corrected_commands = (
corrected for rule in get_rules()
if rule.is_match(command)
for corrected in rule.get_corrected_commands(command))
return organize_commands(corrected_commands) | [
"def",
"get_corrected_commands",
"(",
"command",
")",
":",
"corrected_commands",
"=",
"(",
"corrected",
"for",
"rule",
"in",
"get_rules",
"(",
")",
"if",
"rule",
".",
"is_match",
"(",
"command",
")",
"for",
"corrected",
"in",
"rule",
".",
"get_corrected_commands",
"(",
"command",
")",
")",
"return",
"organize_commands",
"(",
"corrected_commands",
")"
] | Returns generator with sorted and unique corrected commands.
:type command: thefuck.types.Command
:rtype: Iterable[thefuck.types.CorrectedCommand] | [
"Returns",
"generator",
"with",
"sorted",
"and",
"unique",
"corrected",
"commands",
"."
] | python | train |
twilio/twilio-python | twilio/rest/preview/wireless/sim/usage.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/wireless/sim/usage.py#L127-L145 | def fetch(self, end=values.unset, start=values.unset):
"""
Fetch a UsageInstance
:param unicode end: The end
:param unicode start: The start
:returns: Fetched UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageInstance
"""
params = values.of({'End': end, 'Start': start, })
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return UsageInstance(self._version, payload, sim_sid=self._solution['sim_sid'], ) | [
"def",
"fetch",
"(",
"self",
",",
"end",
"=",
"values",
".",
"unset",
",",
"start",
"=",
"values",
".",
"unset",
")",
":",
"params",
"=",
"values",
".",
"of",
"(",
"{",
"'End'",
":",
"end",
",",
"'Start'",
":",
"start",
",",
"}",
")",
"payload",
"=",
"self",
".",
"_version",
".",
"fetch",
"(",
"'GET'",
",",
"self",
".",
"_uri",
",",
"params",
"=",
"params",
",",
")",
"return",
"UsageInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"sim_sid",
"=",
"self",
".",
"_solution",
"[",
"'sim_sid'",
"]",
",",
")"
] | Fetch a UsageInstance
:param unicode end: The end
:param unicode start: The start
:returns: Fetched UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageInstance | [
"Fetch",
"a",
"UsageInstance"
] | python | train |
androguard/androguard | androguard/core/bytecodes/axml/__init__.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/axml/__init__.py#L1034-L1085 | def _fix_name(self, prefix, name):
"""
Apply some fixes to element named and attribute names.
Try to get conform to:
> Like element names, attribute names are case-sensitive and must start with a letter or underscore.
> The rest of the name can contain letters, digits, hyphens, underscores, and periods.
See: https://msdn.microsoft.com/en-us/library/ms256152(v=vs.110).aspx
This function tries to fix some broken namespace mappings.
In some cases, the namespace prefix is inside the name and not in the prefix field.
Then, the tag name will usually look like 'android:foobar'.
If and only if the namespace prefix is inside the namespace mapping and the actual prefix field is empty,
we will strip the prefix from the attribute name and return the fixed prefix URI instead.
Otherwise replacement rules will be applied.
The replacement rules work in that way, that all unwanted characters are replaced by underscores.
In other words, all characters except the ones listed above are replaced.
:param name: Name of the attribute or tag
:param prefix: The existing prefix uri as found in the AXML chunk
:return: a fixed version of prefix and name
:rtype: tuple
"""
if not name[0].isalpha() and name[0] != "_":
log.warning("Invalid start for name '{}'. "
"XML name must start with a letter.".format(name))
self.packerwarning = True
name = "_{}".format(name)
if name.startswith("android:") and prefix == '' and 'android' in self.axml.nsmap:
# Seems be a common thing...
log.info("Name '{}' starts with 'android:' prefix but 'android' is a known prefix. Replacing prefix.".format(name))
prefix = self._print_namespace(self.axml.nsmap['android'])
name = name[len("android:"):]
# It looks like this is some kind of packer... Not sure though.
self.packerwarning = True
elif ":" in name and prefix == '':
self.packerwarning = True
embedded_prefix, new_name = name.split(":", 1)
if embedded_prefix in self.axml.nsmap:
log.info("Prefix '{}' is in namespace mapping, assume that it is a prefix.")
prefix = self._print_namespace(self.axml.nsmap[embedded_prefix])
name = new_name
else:
# Print out an extra warning
log.warning("Confused: name contains a unknown namespace prefix: '{}'. "
"This is either a broken AXML file or some attempt to break stuff.".format(name))
if not re.match(r"^[a-zA-Z0-9._-]*$", name):
log.warning("Name '{}' contains invalid characters!".format(name))
self.packerwarning = True
name = re.sub(r"[^a-zA-Z0-9._-]", "_", name)
return prefix, name | [
"def",
"_fix_name",
"(",
"self",
",",
"prefix",
",",
"name",
")",
":",
"if",
"not",
"name",
"[",
"0",
"]",
".",
"isalpha",
"(",
")",
"and",
"name",
"[",
"0",
"]",
"!=",
"\"_\"",
":",
"log",
".",
"warning",
"(",
"\"Invalid start for name '{}'. \"",
"\"XML name must start with a letter.\"",
".",
"format",
"(",
"name",
")",
")",
"self",
".",
"packerwarning",
"=",
"True",
"name",
"=",
"\"_{}\"",
".",
"format",
"(",
"name",
")",
"if",
"name",
".",
"startswith",
"(",
"\"android:\"",
")",
"and",
"prefix",
"==",
"''",
"and",
"'android'",
"in",
"self",
".",
"axml",
".",
"nsmap",
":",
"# Seems be a common thing...",
"log",
".",
"info",
"(",
"\"Name '{}' starts with 'android:' prefix but 'android' is a known prefix. Replacing prefix.\"",
".",
"format",
"(",
"name",
")",
")",
"prefix",
"=",
"self",
".",
"_print_namespace",
"(",
"self",
".",
"axml",
".",
"nsmap",
"[",
"'android'",
"]",
")",
"name",
"=",
"name",
"[",
"len",
"(",
"\"android:\"",
")",
":",
"]",
"# It looks like this is some kind of packer... Not sure though.",
"self",
".",
"packerwarning",
"=",
"True",
"elif",
"\":\"",
"in",
"name",
"and",
"prefix",
"==",
"''",
":",
"self",
".",
"packerwarning",
"=",
"True",
"embedded_prefix",
",",
"new_name",
"=",
"name",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"if",
"embedded_prefix",
"in",
"self",
".",
"axml",
".",
"nsmap",
":",
"log",
".",
"info",
"(",
"\"Prefix '{}' is in namespace mapping, assume that it is a prefix.\"",
")",
"prefix",
"=",
"self",
".",
"_print_namespace",
"(",
"self",
".",
"axml",
".",
"nsmap",
"[",
"embedded_prefix",
"]",
")",
"name",
"=",
"new_name",
"else",
":",
"# Print out an extra warning",
"log",
".",
"warning",
"(",
"\"Confused: name contains a unknown namespace prefix: '{}'. \"",
"\"This is either a broken AXML file or some attempt to break stuff.\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"not",
"re",
".",
"match",
"(",
"r\"^[a-zA-Z0-9._-]*$\"",
",",
"name",
")",
":",
"log",
".",
"warning",
"(",
"\"Name '{}' contains invalid characters!\"",
".",
"format",
"(",
"name",
")",
")",
"self",
".",
"packerwarning",
"=",
"True",
"name",
"=",
"re",
".",
"sub",
"(",
"r\"[^a-zA-Z0-9._-]\"",
",",
"\"_\"",
",",
"name",
")",
"return",
"prefix",
",",
"name"
] | Apply some fixes to element named and attribute names.
Try to get conform to:
> Like element names, attribute names are case-sensitive and must start with a letter or underscore.
> The rest of the name can contain letters, digits, hyphens, underscores, and periods.
See: https://msdn.microsoft.com/en-us/library/ms256152(v=vs.110).aspx
This function tries to fix some broken namespace mappings.
In some cases, the namespace prefix is inside the name and not in the prefix field.
Then, the tag name will usually look like 'android:foobar'.
If and only if the namespace prefix is inside the namespace mapping and the actual prefix field is empty,
we will strip the prefix from the attribute name and return the fixed prefix URI instead.
Otherwise replacement rules will be applied.
The replacement rules work in that way, that all unwanted characters are replaced by underscores.
In other words, all characters except the ones listed above are replaced.
:param name: Name of the attribute or tag
:param prefix: The existing prefix uri as found in the AXML chunk
:return: a fixed version of prefix and name
:rtype: tuple | [
"Apply",
"some",
"fixes",
"to",
"element",
"named",
"and",
"attribute",
"names",
".",
"Try",
"to",
"get",
"conform",
"to",
":",
">",
"Like",
"element",
"names",
"attribute",
"names",
"are",
"case",
"-",
"sensitive",
"and",
"must",
"start",
"with",
"a",
"letter",
"or",
"underscore",
".",
">",
"The",
"rest",
"of",
"the",
"name",
"can",
"contain",
"letters",
"digits",
"hyphens",
"underscores",
"and",
"periods",
".",
"See",
":",
"https",
":",
"//",
"msdn",
".",
"microsoft",
".",
"com",
"/",
"en",
"-",
"us",
"/",
"library",
"/",
"ms256152",
"(",
"v",
"=",
"vs",
".",
"110",
")",
".",
"aspx"
] | python | train |
calmjs/calmjs.parse | src/calmjs/parse/walkers.py | https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/walkers.py#L78-L92 | def filter(self, node, condition):
"""
This method accepts a node and the condition function; a
generator will be returned to yield the nodes that got matched
by the condition.
"""
if not isinstance(node, Node):
raise TypeError('not a node')
for child in node:
if condition(child):
yield child
for subchild in self.filter(child, condition):
yield subchild | [
"def",
"filter",
"(",
"self",
",",
"node",
",",
"condition",
")",
":",
"if",
"not",
"isinstance",
"(",
"node",
",",
"Node",
")",
":",
"raise",
"TypeError",
"(",
"'not a node'",
")",
"for",
"child",
"in",
"node",
":",
"if",
"condition",
"(",
"child",
")",
":",
"yield",
"child",
"for",
"subchild",
"in",
"self",
".",
"filter",
"(",
"child",
",",
"condition",
")",
":",
"yield",
"subchild"
] | This method accepts a node and the condition function; a
generator will be returned to yield the nodes that got matched
by the condition. | [
"This",
"method",
"accepts",
"a",
"node",
"and",
"the",
"condition",
"function",
";",
"a",
"generator",
"will",
"be",
"returned",
"to",
"yield",
"the",
"nodes",
"that",
"got",
"matched",
"by",
"the",
"condition",
"."
] | python | train |
fermiPy/fermipy | fermipy/skymap.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/skymap.py#L687-L696 | def expanded_counts_map(self):
""" return the full counts map """
if self.hpx._ipix is None:
return self.counts
output = np.zeros(
(self.counts.shape[0], self.hpx._maxpix), self.counts.dtype)
for i in range(self.counts.shape[0]):
output[i][self.hpx._ipix] = self.counts[i]
return output | [
"def",
"expanded_counts_map",
"(",
"self",
")",
":",
"if",
"self",
".",
"hpx",
".",
"_ipix",
"is",
"None",
":",
"return",
"self",
".",
"counts",
"output",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"counts",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"hpx",
".",
"_maxpix",
")",
",",
"self",
".",
"counts",
".",
"dtype",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"counts",
".",
"shape",
"[",
"0",
"]",
")",
":",
"output",
"[",
"i",
"]",
"[",
"self",
".",
"hpx",
".",
"_ipix",
"]",
"=",
"self",
".",
"counts",
"[",
"i",
"]",
"return",
"output"
] | return the full counts map | [
"return",
"the",
"full",
"counts",
"map"
] | python | train |
fugue/credstash | credstash.py | https://github.com/fugue/credstash/blob/56df8e051fc4c8d15d5e7e373e88bf5bc13f3346/credstash.py#L732-L905 | def get_parser():
"""get the parsers dict"""
parsers = {}
parsers['super'] = argparse.ArgumentParser(
description="A credential/secret storage system")
parsers['super'].add_argument("-r", "--region",
help="the AWS region in which to operate. "
"If a region is not specified, credstash "
"will use the value of the "
"AWS_DEFAULT_REGION env variable, "
"or if that is not set, the value in "
"`~/.aws/config`. As a last resort, "
"it will use " + DEFAULT_REGION)
parsers['super'].add_argument("-t", "--table", default="credential-store",
help="DynamoDB table to use for "
"credential storage")
role_parse = parsers['super'].add_mutually_exclusive_group()
role_parse.add_argument("-p", "--profile", default=None,
help="Boto config profile to use when "
"connecting to AWS")
role_parse.add_argument("-n", "--arn", default=None,
help="AWS IAM ARN for AssumeRole")
subparsers = parsers['super'].add_subparsers(help='Try commands like '
'"{name} get -h" or "{name} '
'put --help" to get each '
'sub command\'s options'
.format(name=sys.argv[0]))
action = 'delete'
parsers[action] = subparsers.add_parser(action,
help='Delete a credential from the store')
parsers[action].add_argument("credential", type=str,
help="the name of the credential to delete")
parsers[action].set_defaults(action=action)
action = 'get'
parsers[action] = subparsers.add_parser(action, help="Get a credential "
"from the store")
parsers[action].add_argument("credential", type=str,
help="the name of the credential to get. "
"Using the wildcard character '%s' will "
"search for credentials that match the "
"pattern" % WILDCARD_CHAR)
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-n", "--noline", action="store_true",
help="Don't append newline to returned "
"value (useful in scripts or with "
"binary files)")
parsers[action].add_argument("-v", "--version", default="",
help="Get a specific version of the "
"credential (defaults to the latest version)")
parsers[action].add_argument("-f", "--format", default="json",
choices=["json", "csv", "dotenv"] +
([] if NO_YAML else ["yaml"]),
help="Output format. json(default) " +
("" if NO_YAML else "yaml ") + " csv or dotenv.")
parsers[action].set_defaults(action=action)
action = 'getall'
parsers[action] = subparsers.add_parser(action,
help="Get all credentials from "
"the store")
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-v", "--version", default="",
help="Get a specific version of the "
"credential (defaults to the latest version)")
parsers[action].add_argument("-f", "--format", default="json",
choices=["json", "csv", "dotenv"] +
([] if NO_YAML else ["yaml"]),
help="Output format. json(default) " +
("" if NO_YAML else "yaml ") + " csv or dotenv.")
parsers[action].set_defaults(action=action)
action = 'keys'
parsers[action] = subparsers.add_parser(action,
help="List all keys in the store")
parsers[action].set_defaults(action=action)
action = 'list'
parsers[action] = subparsers.add_parser(action,
help="list credentials and "
"their versions")
parsers[action].set_defaults(action=action)
action = 'put'
parsers[action] = subparsers.add_parser(action,
help="Put a credential into "
"the store")
parsers[action].add_argument("credential", type=str,
help="the name of the credential to store")
parsers[action].add_argument("value", type=value_or_filename,
help="the value of the credential to store "
"or, if beginning with the \"@\" character, "
"the filename of the file containing "
"the value, or pass \"-\" to read the value "
"from stdin", default="", nargs="?")
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-k", "--key", default="alias/credstash",
help="the KMS key-id of the master key "
"to use. See the README for more "
"information. Defaults to alias/credstash")
parsers[action].add_argument("-c", "--comment", type=str,
help="Include reference information or a comment about "
"value to be stored.")
parsers[action].add_argument("-v", "--version", default="1",
help="Put a specific version of the "
"credential (update the credential; "
"defaults to version `1`).")
parsers[action].add_argument("-a", "--autoversion", action="store_true",
help="Automatically increment the version of "
"the credential to be stored. This option "
"causes the `-v` flag to be ignored. "
"(This option will fail if the currently stored "
"version is not numeric.)")
parsers[action].add_argument("-d", "--digest", default=DEFAULT_DIGEST,
choices=HASHING_ALGORITHMS,
help="the hashing algorithm used to "
"to encrypt the data. Defaults to SHA256")
parsers[action].add_argument("-P", "--prompt", action="store_true",
help="Prompt for secret")
parsers[action].set_defaults(action=action)
action = 'putall'
parsers[action] = subparsers.add_parser(action,
help="Put credentials from json into "
"the store")
parsers[action].add_argument("credentials", type=value_or_filename,
help="the value of the credential to store "
"or, if beginning with the \"@\" character, "
"the filename of the file containing "
"the values, or pass \"-\" to read the values "
"from stdin. Should be in json format.", default="")
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-k", "--key", default="alias/credstash",
help="the KMS key-id of the master key "
"to use. See the README for more "
"information. Defaults to alias/credstash")
parsers[action].add_argument("-v", "--version", default="",
help="Put a specific version of the "
"credential (update the credential; "
"defaults to version `1`).")
parsers[action].add_argument("-a", "--autoversion", action="store_true",
help="Automatically increment the version of "
"the credential to be stored. This option "
"causes the `-v` flag to be ignored. "
"(This option will fail if the currently stored "
"version is not numeric.)")
parsers[action].add_argument("-d", "--digest", default="SHA256",
choices=HASHING_ALGORITHMS,
help="the hashing algorithm used to "
"to encrypt the data. Defaults to SHA256")
parsers[action].set_defaults(action=action)
action = 'setup'
parsers[action] = subparsers.add_parser(action,
help='setup the credential store')
parsers[action].set_defaults(action=action)
return parsers | [
"def",
"get_parser",
"(",
")",
":",
"parsers",
"=",
"{",
"}",
"parsers",
"[",
"'super'",
"]",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"A credential/secret storage system\"",
")",
"parsers",
"[",
"'super'",
"]",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--region\"",
",",
"help",
"=",
"\"the AWS region in which to operate. \"",
"\"If a region is not specified, credstash \"",
"\"will use the value of the \"",
"\"AWS_DEFAULT_REGION env variable, \"",
"\"or if that is not set, the value in \"",
"\"`~/.aws/config`. As a last resort, \"",
"\"it will use \"",
"+",
"DEFAULT_REGION",
")",
"parsers",
"[",
"'super'",
"]",
".",
"add_argument",
"(",
"\"-t\"",
",",
"\"--table\"",
",",
"default",
"=",
"\"credential-store\"",
",",
"help",
"=",
"\"DynamoDB table to use for \"",
"\"credential storage\"",
")",
"role_parse",
"=",
"parsers",
"[",
"'super'",
"]",
".",
"add_mutually_exclusive_group",
"(",
")",
"role_parse",
".",
"add_argument",
"(",
"\"-p\"",
",",
"\"--profile\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Boto config profile to use when \"",
"\"connecting to AWS\"",
")",
"role_parse",
".",
"add_argument",
"(",
"\"-n\"",
",",
"\"--arn\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"AWS IAM ARN for AssumeRole\"",
")",
"subparsers",
"=",
"parsers",
"[",
"'super'",
"]",
".",
"add_subparsers",
"(",
"help",
"=",
"'Try commands like '",
"'\"{name} get -h\" or \"{name} '",
"'put --help\" to get each '",
"'sub command\\'s options'",
".",
"format",
"(",
"name",
"=",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")",
"action",
"=",
"'delete'",
"parsers",
"[",
"action",
"]",
"=",
"subparsers",
".",
"add_parser",
"(",
"action",
",",
"help",
"=",
"'Delete a credential from the store'",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"credential\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"the name of the credential to delete\"",
")",
"parsers",
"[",
"action",
"]",
".",
"set_defaults",
"(",
"action",
"=",
"action",
")",
"action",
"=",
"'get'",
"parsers",
"[",
"action",
"]",
"=",
"subparsers",
".",
"add_parser",
"(",
"action",
",",
"help",
"=",
"\"Get a credential \"",
"\"from the store\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"credential\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"the name of the credential to get. \"",
"\"Using the wildcard character '%s' will \"",
"\"search for credentials that match the \"",
"\"pattern\"",
"%",
"WILDCARD_CHAR",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"context\"",
",",
"type",
"=",
"key_value_pair",
",",
"action",
"=",
"KeyValueToDictionary",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"\"encryption context key/value pairs \"",
"\"associated with the credential in the form \"",
"\"of \\\"key=value\\\"\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-n\"",
",",
"\"--noline\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Don't append newline to returned \"",
"\"value (useful in scripts or with \"",
"\"binary files)\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Get a specific version of the \"",
"\"credential (defaults to the latest version)\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--format\"",
",",
"default",
"=",
"\"json\"",
",",
"choices",
"=",
"[",
"\"json\"",
",",
"\"csv\"",
",",
"\"dotenv\"",
"]",
"+",
"(",
"[",
"]",
"if",
"NO_YAML",
"else",
"[",
"\"yaml\"",
"]",
")",
",",
"help",
"=",
"\"Output format. json(default) \"",
"+",
"(",
"\"\"",
"if",
"NO_YAML",
"else",
"\"yaml \"",
")",
"+",
"\" csv or dotenv.\"",
")",
"parsers",
"[",
"action",
"]",
".",
"set_defaults",
"(",
"action",
"=",
"action",
")",
"action",
"=",
"'getall'",
"parsers",
"[",
"action",
"]",
"=",
"subparsers",
".",
"add_parser",
"(",
"action",
",",
"help",
"=",
"\"Get all credentials from \"",
"\"the store\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"context\"",
",",
"type",
"=",
"key_value_pair",
",",
"action",
"=",
"KeyValueToDictionary",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"\"encryption context key/value pairs \"",
"\"associated with the credential in the form \"",
"\"of \\\"key=value\\\"\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Get a specific version of the \"",
"\"credential (defaults to the latest version)\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--format\"",
",",
"default",
"=",
"\"json\"",
",",
"choices",
"=",
"[",
"\"json\"",
",",
"\"csv\"",
",",
"\"dotenv\"",
"]",
"+",
"(",
"[",
"]",
"if",
"NO_YAML",
"else",
"[",
"\"yaml\"",
"]",
")",
",",
"help",
"=",
"\"Output format. json(default) \"",
"+",
"(",
"\"\"",
"if",
"NO_YAML",
"else",
"\"yaml \"",
")",
"+",
"\" csv or dotenv.\"",
")",
"parsers",
"[",
"action",
"]",
".",
"set_defaults",
"(",
"action",
"=",
"action",
")",
"action",
"=",
"'keys'",
"parsers",
"[",
"action",
"]",
"=",
"subparsers",
".",
"add_parser",
"(",
"action",
",",
"help",
"=",
"\"List all keys in the store\"",
")",
"parsers",
"[",
"action",
"]",
".",
"set_defaults",
"(",
"action",
"=",
"action",
")",
"action",
"=",
"'list'",
"parsers",
"[",
"action",
"]",
"=",
"subparsers",
".",
"add_parser",
"(",
"action",
",",
"help",
"=",
"\"list credentials and \"",
"\"their versions\"",
")",
"parsers",
"[",
"action",
"]",
".",
"set_defaults",
"(",
"action",
"=",
"action",
")",
"action",
"=",
"'put'",
"parsers",
"[",
"action",
"]",
"=",
"subparsers",
".",
"add_parser",
"(",
"action",
",",
"help",
"=",
"\"Put a credential into \"",
"\"the store\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"credential\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"the name of the credential to store\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"value\"",
",",
"type",
"=",
"value_or_filename",
",",
"help",
"=",
"\"the value of the credential to store \"",
"\"or, if beginning with the \\\"@\\\" character, \"",
"\"the filename of the file containing \"",
"\"the value, or pass \\\"-\\\" to read the value \"",
"\"from stdin\"",
",",
"default",
"=",
"\"\"",
",",
"nargs",
"=",
"\"?\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"context\"",
",",
"type",
"=",
"key_value_pair",
",",
"action",
"=",
"KeyValueToDictionary",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"\"encryption context key/value pairs \"",
"\"associated with the credential in the form \"",
"\"of \\\"key=value\\\"\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-k\"",
",",
"\"--key\"",
",",
"default",
"=",
"\"alias/credstash\"",
",",
"help",
"=",
"\"the KMS key-id of the master key \"",
"\"to use. See the README for more \"",
"\"information. Defaults to alias/credstash\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--comment\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Include reference information or a comment about \"",
"\"value to be stored.\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"default",
"=",
"\"1\"",
",",
"help",
"=",
"\"Put a specific version of the \"",
"\"credential (update the credential; \"",
"\"defaults to version `1`).\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-a\"",
",",
"\"--autoversion\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Automatically increment the version of \"",
"\"the credential to be stored. This option \"",
"\"causes the `-v` flag to be ignored. \"",
"\"(This option will fail if the currently stored \"",
"\"version is not numeric.)\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--digest\"",
",",
"default",
"=",
"DEFAULT_DIGEST",
",",
"choices",
"=",
"HASHING_ALGORITHMS",
",",
"help",
"=",
"\"the hashing algorithm used to \"",
"\"to encrypt the data. Defaults to SHA256\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-P\"",
",",
"\"--prompt\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Prompt for secret\"",
")",
"parsers",
"[",
"action",
"]",
".",
"set_defaults",
"(",
"action",
"=",
"action",
")",
"action",
"=",
"'putall'",
"parsers",
"[",
"action",
"]",
"=",
"subparsers",
".",
"add_parser",
"(",
"action",
",",
"help",
"=",
"\"Put credentials from json into \"",
"\"the store\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"credentials\"",
",",
"type",
"=",
"value_or_filename",
",",
"help",
"=",
"\"the value of the credential to store \"",
"\"or, if beginning with the \\\"@\\\" character, \"",
"\"the filename of the file containing \"",
"\"the values, or pass \\\"-\\\" to read the values \"",
"\"from stdin. Should be in json format.\"",
",",
"default",
"=",
"\"\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"context\"",
",",
"type",
"=",
"key_value_pair",
",",
"action",
"=",
"KeyValueToDictionary",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"\"encryption context key/value pairs \"",
"\"associated with the credential in the form \"",
"\"of \\\"key=value\\\"\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-k\"",
",",
"\"--key\"",
",",
"default",
"=",
"\"alias/credstash\"",
",",
"help",
"=",
"\"the KMS key-id of the master key \"",
"\"to use. See the README for more \"",
"\"information. Defaults to alias/credstash\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Put a specific version of the \"",
"\"credential (update the credential; \"",
"\"defaults to version `1`).\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-a\"",
",",
"\"--autoversion\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Automatically increment the version of \"",
"\"the credential to be stored. This option \"",
"\"causes the `-v` flag to be ignored. \"",
"\"(This option will fail if the currently stored \"",
"\"version is not numeric.)\"",
")",
"parsers",
"[",
"action",
"]",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--digest\"",
",",
"default",
"=",
"\"SHA256\"",
",",
"choices",
"=",
"HASHING_ALGORITHMS",
",",
"help",
"=",
"\"the hashing algorithm used to \"",
"\"to encrypt the data. Defaults to SHA256\"",
")",
"parsers",
"[",
"action",
"]",
".",
"set_defaults",
"(",
"action",
"=",
"action",
")",
"action",
"=",
"'setup'",
"parsers",
"[",
"action",
"]",
"=",
"subparsers",
".",
"add_parser",
"(",
"action",
",",
"help",
"=",
"'setup the credential store'",
")",
"parsers",
"[",
"action",
"]",
".",
"set_defaults",
"(",
"action",
"=",
"action",
")",
"return",
"parsers"
] | get the parsers dict | [
"get",
"the",
"parsers",
"dict"
] | python | train |
nccgroup/Scout2 | AWSScout2/services/vpc.py | https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/vpc.py#L188-L208 | def get_cidr_name(cidr, ip_ranges_files, ip_ranges_name_key):
"""
Read display name for CIDRs from ip-ranges files
:param cidr:
:param ip_ranges_files:
:param ip_ranges_name_key:
:return:
"""
for filename in ip_ranges_files:
ip_ranges = read_ip_ranges(filename, local_file=True)
for ip_range in ip_ranges:
ip_prefix = netaddr.IPNetwork(ip_range['ip_prefix'])
cidr = netaddr.IPNetwork(cidr)
if cidr in ip_prefix:
return ip_range[ip_ranges_name_key].strip()
for ip_range in aws_ip_ranges:
ip_prefix = netaddr.IPNetwork(ip_range['ip_prefix'])
cidr = netaddr.IPNetwork(cidr)
if cidr in ip_prefix:
return 'Unknown CIDR in %s %s' % (ip_range['service'], ip_range['region'])
return 'Unknown CIDR' | [
"def",
"get_cidr_name",
"(",
"cidr",
",",
"ip_ranges_files",
",",
"ip_ranges_name_key",
")",
":",
"for",
"filename",
"in",
"ip_ranges_files",
":",
"ip_ranges",
"=",
"read_ip_ranges",
"(",
"filename",
",",
"local_file",
"=",
"True",
")",
"for",
"ip_range",
"in",
"ip_ranges",
":",
"ip_prefix",
"=",
"netaddr",
".",
"IPNetwork",
"(",
"ip_range",
"[",
"'ip_prefix'",
"]",
")",
"cidr",
"=",
"netaddr",
".",
"IPNetwork",
"(",
"cidr",
")",
"if",
"cidr",
"in",
"ip_prefix",
":",
"return",
"ip_range",
"[",
"ip_ranges_name_key",
"]",
".",
"strip",
"(",
")",
"for",
"ip_range",
"in",
"aws_ip_ranges",
":",
"ip_prefix",
"=",
"netaddr",
".",
"IPNetwork",
"(",
"ip_range",
"[",
"'ip_prefix'",
"]",
")",
"cidr",
"=",
"netaddr",
".",
"IPNetwork",
"(",
"cidr",
")",
"if",
"cidr",
"in",
"ip_prefix",
":",
"return",
"'Unknown CIDR in %s %s'",
"%",
"(",
"ip_range",
"[",
"'service'",
"]",
",",
"ip_range",
"[",
"'region'",
"]",
")",
"return",
"'Unknown CIDR'"
] | Read display name for CIDRs from ip-ranges files
:param cidr:
:param ip_ranges_files:
:param ip_ranges_name_key:
:return: | [
"Read",
"display",
"name",
"for",
"CIDRs",
"from",
"ip",
"-",
"ranges",
"files",
":",
"param",
"cidr",
":",
":",
"param",
"ip_ranges_files",
":",
":",
"param",
"ip_ranges_name_key",
":",
":",
"return",
":"
] | python | train |
Gandi/gandi.cli | gandi/cli/modules/webacc.py | https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/webacc.py#L220-L244 | def probe(cls, resource, enable, disable, test, host, interval,
http_method, http_response, threshold, timeout, url, window):
""" Set a probe for a webaccelerator """
params = {
'host': host,
'interval': interval,
'method': http_method,
'response': http_response,
'threshold': threshold,
'timeout': timeout,
'url': url,
'window': window
}
if enable:
params['enable'] = True
elif disable:
params['enable'] = False
if test:
result = cls.call(
'hosting.rproxy.probe.test', cls.usable_id(resource), params)
else:
result = cls.call(
'hosting.rproxy.probe.update', cls.usable_id(resource), params)
cls.display_progress(result)
return result | [
"def",
"probe",
"(",
"cls",
",",
"resource",
",",
"enable",
",",
"disable",
",",
"test",
",",
"host",
",",
"interval",
",",
"http_method",
",",
"http_response",
",",
"threshold",
",",
"timeout",
",",
"url",
",",
"window",
")",
":",
"params",
"=",
"{",
"'host'",
":",
"host",
",",
"'interval'",
":",
"interval",
",",
"'method'",
":",
"http_method",
",",
"'response'",
":",
"http_response",
",",
"'threshold'",
":",
"threshold",
",",
"'timeout'",
":",
"timeout",
",",
"'url'",
":",
"url",
",",
"'window'",
":",
"window",
"}",
"if",
"enable",
":",
"params",
"[",
"'enable'",
"]",
"=",
"True",
"elif",
"disable",
":",
"params",
"[",
"'enable'",
"]",
"=",
"False",
"if",
"test",
":",
"result",
"=",
"cls",
".",
"call",
"(",
"'hosting.rproxy.probe.test'",
",",
"cls",
".",
"usable_id",
"(",
"resource",
")",
",",
"params",
")",
"else",
":",
"result",
"=",
"cls",
".",
"call",
"(",
"'hosting.rproxy.probe.update'",
",",
"cls",
".",
"usable_id",
"(",
"resource",
")",
",",
"params",
")",
"cls",
".",
"display_progress",
"(",
"result",
")",
"return",
"result"
] | Set a probe for a webaccelerator | [
"Set",
"a",
"probe",
"for",
"a",
"webaccelerator"
] | python | train |
pypa/pipenv | pipenv/vendor/requirementslib/models/utils.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requirementslib/models/utils.py#L156-L165 | def parse_extras(extras_str):
# type: (AnyStr) -> List[AnyStr]
"""
Turn a string of extras into a parsed extras list
"""
from pkg_resources import Requirement
extras = Requirement.parse("fakepkg{0}".format(extras_to_string(extras_str))).extras
return sorted(dedup([extra.lower() for extra in extras])) | [
"def",
"parse_extras",
"(",
"extras_str",
")",
":",
"# type: (AnyStr) -> List[AnyStr]",
"from",
"pkg_resources",
"import",
"Requirement",
"extras",
"=",
"Requirement",
".",
"parse",
"(",
"\"fakepkg{0}\"",
".",
"format",
"(",
"extras_to_string",
"(",
"extras_str",
")",
")",
")",
".",
"extras",
"return",
"sorted",
"(",
"dedup",
"(",
"[",
"extra",
".",
"lower",
"(",
")",
"for",
"extra",
"in",
"extras",
"]",
")",
")"
] | Turn a string of extras into a parsed extras list | [
"Turn",
"a",
"string",
"of",
"extras",
"into",
"a",
"parsed",
"extras",
"list"
] | python | train |
fastavro/fastavro | fastavro/_write_py.py | https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L543-L633 | def writer(fo,
schema,
records,
codec='null',
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None,
sync_marker=None):
"""Write records to fo (stream) according to schema
Parameters
----------
fo: file-like
Output stream
records: iterable
Records to write. This is commonly a list of the dictionary
representation of the records, but it can be any iterable
codec: string, optional
Compression codec, can be 'null', 'deflate' or 'snappy' (if installed)
sync_interval: int, optional
Size of sync interval
metadata: dict, optional
Header metadata
validator: None, True or a function
Validator function. If None (the default) - no validation. If True then
then fastavro.validation.validate will be used. If it's a function, it
should have the same signature as fastavro.writer.validate and raise an
exeption on error.
sync_marker: bytes, optional
A byte string used as the avro sync marker. If not provided, a random
byte string will be used.
Example::
from fastavro import writer, parse_schema
schema = {
'doc': 'A weather reading.',
'name': 'Weather',
'namespace': 'test',
'type': 'record',
'fields': [
{'name': 'station', 'type': 'string'},
{'name': 'time', 'type': 'long'},
{'name': 'temp', 'type': 'int'},
],
}
parsed_schema = parse_schema(schema)
records = [
{u'station': u'011990-99999', u'temp': 0, u'time': 1433269388},
{u'station': u'011990-99999', u'temp': 22, u'time': 1433270389},
{u'station': u'011990-99999', u'temp': -11, u'time': 1433273379},
{u'station': u'012650-99999', u'temp': 111, u'time': 1433275478},
]
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
Given an existing avro file, it's possible to append to it by re-opening
the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't
able to read some of the existing header information and an error will be
raised. For example::
# Write initial records
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
# Write some more records
with open('weather.avro', 'a+b') as out:
writer(out, parsed_schema, more_records)
"""
# Sanity check that records is not a single dictionary (as that is a common
# mistake and the exception that gets raised is not helpful)
if isinstance(records, dict):
raise ValueError('"records" argument should be an iterable, not dict')
output = Writer(
fo,
schema,
codec,
sync_interval,
metadata,
validator,
sync_marker,
)
for record in records:
output.write(record)
output.flush() | [
"def",
"writer",
"(",
"fo",
",",
"schema",
",",
"records",
",",
"codec",
"=",
"'null'",
",",
"sync_interval",
"=",
"1000",
"*",
"SYNC_SIZE",
",",
"metadata",
"=",
"None",
",",
"validator",
"=",
"None",
",",
"sync_marker",
"=",
"None",
")",
":",
"# Sanity check that records is not a single dictionary (as that is a common",
"# mistake and the exception that gets raised is not helpful)",
"if",
"isinstance",
"(",
"records",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"'\"records\" argument should be an iterable, not dict'",
")",
"output",
"=",
"Writer",
"(",
"fo",
",",
"schema",
",",
"codec",
",",
"sync_interval",
",",
"metadata",
",",
"validator",
",",
"sync_marker",
",",
")",
"for",
"record",
"in",
"records",
":",
"output",
".",
"write",
"(",
"record",
")",
"output",
".",
"flush",
"(",
")"
] | Write records to fo (stream) according to schema
Parameters
----------
fo: file-like
Output stream
records: iterable
Records to write. This is commonly a list of the dictionary
representation of the records, but it can be any iterable
codec: string, optional
Compression codec, can be 'null', 'deflate' or 'snappy' (if installed)
sync_interval: int, optional
Size of sync interval
metadata: dict, optional
Header metadata
validator: None, True or a function
Validator function. If None (the default) - no validation. If True then
then fastavro.validation.validate will be used. If it's a function, it
should have the same signature as fastavro.writer.validate and raise an
exeption on error.
sync_marker: bytes, optional
A byte string used as the avro sync marker. If not provided, a random
byte string will be used.
Example::
from fastavro import writer, parse_schema
schema = {
'doc': 'A weather reading.',
'name': 'Weather',
'namespace': 'test',
'type': 'record',
'fields': [
{'name': 'station', 'type': 'string'},
{'name': 'time', 'type': 'long'},
{'name': 'temp', 'type': 'int'},
],
}
parsed_schema = parse_schema(schema)
records = [
{u'station': u'011990-99999', u'temp': 0, u'time': 1433269388},
{u'station': u'011990-99999', u'temp': 22, u'time': 1433270389},
{u'station': u'011990-99999', u'temp': -11, u'time': 1433273379},
{u'station': u'012650-99999', u'temp': 111, u'time': 1433275478},
]
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
Given an existing avro file, it's possible to append to it by re-opening
the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't
able to read some of the existing header information and an error will be
raised. For example::
# Write initial records
with open('weather.avro', 'wb') as out:
writer(out, parsed_schema, records)
# Write some more records
with open('weather.avro', 'a+b') as out:
writer(out, parsed_schema, more_records) | [
"Write",
"records",
"to",
"fo",
"(",
"stream",
")",
"according",
"to",
"schema"
] | python | train |
openstack/proliantutils | proliantutils/hpssa/objects.py | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L517-L562 | def can_accomodate(self, logical_disk):
"""Check if this RAID array can accomodate the logical disk.
This method uses hpssacli/ssacli command's option to check if the
logical disk with desired size and RAID level can be created
on this RAID array.
:param logical_disk: Dictionary of logical disk to be created.
:returns: True, if logical disk can be created on the RAID array
False, otherwise.
"""
raid_level = constants.RAID_LEVEL_INPUT_TO_HPSSA_MAPPING.get(
logical_disk['raid_level'], logical_disk['raid_level'])
args = ("array", self.id, "create", "type=logicaldrive",
"raid=%s" % raid_level, "size=?")
if logical_disk['size_gb'] != "MAX":
desired_disk_size = logical_disk['size_gb']
else:
desired_disk_size = constants.MINIMUM_DISK_SIZE
try:
stdout, stderr = self.parent.execute_cmd(
*args, dont_transform_to_hpssa_exception=True)
except processutils.ProcessExecutionError as ex:
# hpssacli/ssacli returns error code 1 when RAID level of the
# logical disk is not supported on the array.
# If that's the case, just return saying the logical disk
# cannot be accomodated in the array.
# If exist_code is not 1, then it's some other error that we
# don't expect to appear and hence raise it back.
if ex.exit_code == 1:
return False
else:
raise exception.HPSSAOperationError(reason=ex)
except Exception as ex:
raise exception.HPSSAOperationError(reason=ex)
# TODO(rameshg87): This always returns in MB, but confirm with
# HPSSA folks.
match = re.search('Max: (\d+)', stdout)
if not match:
return False
max_size_gb = int(match.group(1)) / 1024
return desired_disk_size <= max_size_gb | [
"def",
"can_accomodate",
"(",
"self",
",",
"logical_disk",
")",
":",
"raid_level",
"=",
"constants",
".",
"RAID_LEVEL_INPUT_TO_HPSSA_MAPPING",
".",
"get",
"(",
"logical_disk",
"[",
"'raid_level'",
"]",
",",
"logical_disk",
"[",
"'raid_level'",
"]",
")",
"args",
"=",
"(",
"\"array\"",
",",
"self",
".",
"id",
",",
"\"create\"",
",",
"\"type=logicaldrive\"",
",",
"\"raid=%s\"",
"%",
"raid_level",
",",
"\"size=?\"",
")",
"if",
"logical_disk",
"[",
"'size_gb'",
"]",
"!=",
"\"MAX\"",
":",
"desired_disk_size",
"=",
"logical_disk",
"[",
"'size_gb'",
"]",
"else",
":",
"desired_disk_size",
"=",
"constants",
".",
"MINIMUM_DISK_SIZE",
"try",
":",
"stdout",
",",
"stderr",
"=",
"self",
".",
"parent",
".",
"execute_cmd",
"(",
"*",
"args",
",",
"dont_transform_to_hpssa_exception",
"=",
"True",
")",
"except",
"processutils",
".",
"ProcessExecutionError",
"as",
"ex",
":",
"# hpssacli/ssacli returns error code 1 when RAID level of the",
"# logical disk is not supported on the array.",
"# If that's the case, just return saying the logical disk",
"# cannot be accomodated in the array.",
"# If exist_code is not 1, then it's some other error that we",
"# don't expect to appear and hence raise it back.",
"if",
"ex",
".",
"exit_code",
"==",
"1",
":",
"return",
"False",
"else",
":",
"raise",
"exception",
".",
"HPSSAOperationError",
"(",
"reason",
"=",
"ex",
")",
"except",
"Exception",
"as",
"ex",
":",
"raise",
"exception",
".",
"HPSSAOperationError",
"(",
"reason",
"=",
"ex",
")",
"# TODO(rameshg87): This always returns in MB, but confirm with",
"# HPSSA folks.",
"match",
"=",
"re",
".",
"search",
"(",
"'Max: (\\d+)'",
",",
"stdout",
")",
"if",
"not",
"match",
":",
"return",
"False",
"max_size_gb",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"/",
"1024",
"return",
"desired_disk_size",
"<=",
"max_size_gb"
] | Check if this RAID array can accomodate the logical disk.
This method uses hpssacli/ssacli command's option to check if the
logical disk with desired size and RAID level can be created
on this RAID array.
:param logical_disk: Dictionary of logical disk to be created.
:returns: True, if logical disk can be created on the RAID array
False, otherwise. | [
"Check",
"if",
"this",
"RAID",
"array",
"can",
"accomodate",
"the",
"logical",
"disk",
"."
] | python | train |
petebachant/PXL | pxl/timeseries.py | https://github.com/petebachant/PXL/blob/d7d06cb74422e1ac0154741351fbecea080cfcc0/pxl/timeseries.py#L141-L143 | def autocorr_coeff(x, t, tau1, tau2):
"""Calculate the autocorrelation coefficient."""
return corr_coeff(x, x, t, tau1, tau2) | [
"def",
"autocorr_coeff",
"(",
"x",
",",
"t",
",",
"tau1",
",",
"tau2",
")",
":",
"return",
"corr_coeff",
"(",
"x",
",",
"x",
",",
"t",
",",
"tau1",
",",
"tau2",
")"
] | Calculate the autocorrelation coefficient. | [
"Calculate",
"the",
"autocorrelation",
"coefficient",
"."
] | python | train |
Skype4Py/Skype4Py | Skype4Py/api/posix_x11.py | https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/api/posix_x11.py#L323-L337 | def get_skype(self):
"""Returns Skype window ID or None if Skype not running."""
skype_inst = x11.XInternAtom(self.disp, '_SKYPE_INSTANCE', True)
if not skype_inst:
return
type_ret = Atom()
format_ret = c_int()
nitems_ret = c_ulong()
bytes_after_ret = c_ulong()
winp = pointer(Window())
fail = x11.XGetWindowProperty(self.disp, self.win_root, skype_inst,
0, 1, False, 33, byref(type_ret), byref(format_ret),
byref(nitems_ret), byref(bytes_after_ret), byref(winp))
if not fail and format_ret.value == 32 and nitems_ret.value == 1:
return winp.contents.value | [
"def",
"get_skype",
"(",
"self",
")",
":",
"skype_inst",
"=",
"x11",
".",
"XInternAtom",
"(",
"self",
".",
"disp",
",",
"'_SKYPE_INSTANCE'",
",",
"True",
")",
"if",
"not",
"skype_inst",
":",
"return",
"type_ret",
"=",
"Atom",
"(",
")",
"format_ret",
"=",
"c_int",
"(",
")",
"nitems_ret",
"=",
"c_ulong",
"(",
")",
"bytes_after_ret",
"=",
"c_ulong",
"(",
")",
"winp",
"=",
"pointer",
"(",
"Window",
"(",
")",
")",
"fail",
"=",
"x11",
".",
"XGetWindowProperty",
"(",
"self",
".",
"disp",
",",
"self",
".",
"win_root",
",",
"skype_inst",
",",
"0",
",",
"1",
",",
"False",
",",
"33",
",",
"byref",
"(",
"type_ret",
")",
",",
"byref",
"(",
"format_ret",
")",
",",
"byref",
"(",
"nitems_ret",
")",
",",
"byref",
"(",
"bytes_after_ret",
")",
",",
"byref",
"(",
"winp",
")",
")",
"if",
"not",
"fail",
"and",
"format_ret",
".",
"value",
"==",
"32",
"and",
"nitems_ret",
".",
"value",
"==",
"1",
":",
"return",
"winp",
".",
"contents",
".",
"value"
] | Returns Skype window ID or None if Skype not running. | [
"Returns",
"Skype",
"window",
"ID",
"or",
"None",
"if",
"Skype",
"not",
"running",
"."
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.