repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
fhamborg/news-please | newsplease/pipeline/extractor/cleaner.py | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/cleaner.py#L35-L51 | def delete_whitespaces(self, arg):
"""Removes newlines, tabs and whitespaces at the beginning, the end and if there is more than one.
:param arg: A string, the string which shell be cleaned
:return: A string, the cleaned string
"""
# Deletes whitespaces after a newline
arg = re.sub(re_newline_spc, '', arg)
# Deletes every whitespace, tabulator, newline at the beginning of the string
arg = re.sub(re_starting_whitespc, '', arg)
# Deletes whitespace or tabulator if followed by whitespace or tabulator
arg = re.sub(re_multi_spc_tab, '', arg)
# Deletes newline if it is followed by an other one
arg = re.sub(re_double_newline, '', arg)
# Deletes newlines and whitespaces at the end of the string
arg = re.sub(re_ending_spc_newline, '', arg)
return arg | [
"def",
"delete_whitespaces",
"(",
"self",
",",
"arg",
")",
":",
"# Deletes whitespaces after a newline",
"arg",
"=",
"re",
".",
"sub",
"(",
"re_newline_spc",
",",
"''",
",",
"arg",
")",
"# Deletes every whitespace, tabulator, newline at the beginning of the string",
"arg",
"=",
"re",
".",
"sub",
"(",
"re_starting_whitespc",
",",
"''",
",",
"arg",
")",
"# Deletes whitespace or tabulator if followed by whitespace or tabulator",
"arg",
"=",
"re",
".",
"sub",
"(",
"re_multi_spc_tab",
",",
"''",
",",
"arg",
")",
"# Deletes newline if it is followed by an other one",
"arg",
"=",
"re",
".",
"sub",
"(",
"re_double_newline",
",",
"''",
",",
"arg",
")",
"# Deletes newlines and whitespaces at the end of the string",
"arg",
"=",
"re",
".",
"sub",
"(",
"re_ending_spc_newline",
",",
"''",
",",
"arg",
")",
"return",
"arg"
] | Removes newlines, tabs and whitespaces at the beginning, the end and if there is more than one.
:param arg: A string, the string which shell be cleaned
:return: A string, the cleaned string | [
"Removes",
"newlines",
"tabs",
"and",
"whitespaces",
"at",
"the",
"beginning",
"the",
"end",
"and",
"if",
"there",
"is",
"more",
"than",
"one",
"."
] | python | train |
ricequant/rqalpha | rqalpha/api/api_base.py | https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/api/api_base.py#L224-L291 | def submit_order(id_or_ins, amount, side, price=None, position_effect=None):
"""
通用下单函数,策略可以通过该函数自由选择参数下单。
:param id_or_ins: 下单标的物
:type id_or_ins: :class:`~Instrument` object | `str`
:param float amount: 下单量,需为正数
:param side: 多空方向,多(SIDE.BUY)或空(SIDE.SELL)
:type side: :class:`~SIDE` enum
:param float price: 下单价格,默认为None,表示市价单
:param position_effect: 开平方向,开仓(POSITION_EFFECT.OPEN),平仓(POSITION.CLOSE)或平今(POSITION_EFFECT.CLOSE_TODAY),交易股票不需要该参数
:type position_effect: :class:`~POSITION_EFFECT` enum
:return: :class:`~Order` object | None
:example:
.. code-block:: python
# 购买 2000 股的平安银行股票,并以市价单发送:
submit_order('000001.XSHE', 2000, SIDE.BUY)
# 平 10 份 RB1812 多方向的今仓,并以 4000 的价格发送限价单
submit_order('RB1812', 10, SIDE.SELL, price=4000, position_effect=POSITION_EFFECT.CLOSE_TODAY)
"""
order_book_id = assure_order_book_id(id_or_ins)
env = Environment.get_instance()
if (
env.config.base.run_type != RUN_TYPE.BACKTEST
and env.get_instrument(order_book_id).type == "Future"
):
if "88" in order_book_id:
raise RQInvalidArgument(
_(u"Main Future contracts[88] are not supported in paper trading.")
)
if "99" in order_book_id:
raise RQInvalidArgument(
_(u"Index Future contracts[99] are not supported in paper trading.")
)
style = cal_style(price, None)
market_price = env.get_last_price(order_book_id)
if not is_valid_price(market_price):
user_system_log.warn(
_(u"Order Creation Failed: [{order_book_id}] No market data").format(
order_book_id=order_book_id
)
)
return
amount = int(amount)
order = Order.__from_create__(
order_book_id=order_book_id,
quantity=amount,
side=side,
style=style,
position_effect=position_effect,
)
if order.type == ORDER_TYPE.MARKET:
order.set_frozen_price(market_price)
if env.can_submit_order(order):
env.broker.submit_order(order)
return order | [
"def",
"submit_order",
"(",
"id_or_ins",
",",
"amount",
",",
"side",
",",
"price",
"=",
"None",
",",
"position_effect",
"=",
"None",
")",
":",
"order_book_id",
"=",
"assure_order_book_id",
"(",
"id_or_ins",
")",
"env",
"=",
"Environment",
".",
"get_instance",
"(",
")",
"if",
"(",
"env",
".",
"config",
".",
"base",
".",
"run_type",
"!=",
"RUN_TYPE",
".",
"BACKTEST",
"and",
"env",
".",
"get_instrument",
"(",
"order_book_id",
")",
".",
"type",
"==",
"\"Future\"",
")",
":",
"if",
"\"88\"",
"in",
"order_book_id",
":",
"raise",
"RQInvalidArgument",
"(",
"_",
"(",
"u\"Main Future contracts[88] are not supported in paper trading.\"",
")",
")",
"if",
"\"99\"",
"in",
"order_book_id",
":",
"raise",
"RQInvalidArgument",
"(",
"_",
"(",
"u\"Index Future contracts[99] are not supported in paper trading.\"",
")",
")",
"style",
"=",
"cal_style",
"(",
"price",
",",
"None",
")",
"market_price",
"=",
"env",
".",
"get_last_price",
"(",
"order_book_id",
")",
"if",
"not",
"is_valid_price",
"(",
"market_price",
")",
":",
"user_system_log",
".",
"warn",
"(",
"_",
"(",
"u\"Order Creation Failed: [{order_book_id}] No market data\"",
")",
".",
"format",
"(",
"order_book_id",
"=",
"order_book_id",
")",
")",
"return",
"amount",
"=",
"int",
"(",
"amount",
")",
"order",
"=",
"Order",
".",
"__from_create__",
"(",
"order_book_id",
"=",
"order_book_id",
",",
"quantity",
"=",
"amount",
",",
"side",
"=",
"side",
",",
"style",
"=",
"style",
",",
"position_effect",
"=",
"position_effect",
",",
")",
"if",
"order",
".",
"type",
"==",
"ORDER_TYPE",
".",
"MARKET",
":",
"order",
".",
"set_frozen_price",
"(",
"market_price",
")",
"if",
"env",
".",
"can_submit_order",
"(",
"order",
")",
":",
"env",
".",
"broker",
".",
"submit_order",
"(",
"order",
")",
"return",
"order"
] | 通用下单函数,策略可以通过该函数自由选择参数下单。
:param id_or_ins: 下单标的物
:type id_or_ins: :class:`~Instrument` object | `str`
:param float amount: 下单量,需为正数
:param side: 多空方向,多(SIDE.BUY)或空(SIDE.SELL)
:type side: :class:`~SIDE` enum
:param float price: 下单价格,默认为None,表示市价单
:param position_effect: 开平方向,开仓(POSITION_EFFECT.OPEN),平仓(POSITION.CLOSE)或平今(POSITION_EFFECT.CLOSE_TODAY),交易股票不需要该参数
:type position_effect: :class:`~POSITION_EFFECT` enum
:return: :class:`~Order` object | None
:example:
.. code-block:: python
# 购买 2000 股的平安银行股票,并以市价单发送:
submit_order('000001.XSHE', 2000, SIDE.BUY)
# 平 10 份 RB1812 多方向的今仓,并以 4000 的价格发送限价单
submit_order('RB1812', 10, SIDE.SELL, price=4000, position_effect=POSITION_EFFECT.CLOSE_TODAY) | [
"通用下单函数,策略可以通过该函数自由选择参数下单。"
] | python | train |
callowayproject/Transmogrify | transmogrify/utils.py | https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/utils.py#L252-L264 | def get_cached_files(url, server_name="", document_root=None):
"""
Given a URL, return a list of paths of all cached variations of that file.
Doesn't include the original file.
"""
import glob
url_info = process_url(url, server_name, document_root, check_security=False)
# get path to cache directory with basename of file (no extension)
filedir = os.path.dirname(url_info['requested_file'])
fileglob = '{0}*{1}'.format(url_info['base_filename'], url_info['ext'])
return glob.glob(os.path.join(filedir, fileglob)) | [
"def",
"get_cached_files",
"(",
"url",
",",
"server_name",
"=",
"\"\"",
",",
"document_root",
"=",
"None",
")",
":",
"import",
"glob",
"url_info",
"=",
"process_url",
"(",
"url",
",",
"server_name",
",",
"document_root",
",",
"check_security",
"=",
"False",
")",
"# get path to cache directory with basename of file (no extension)",
"filedir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"url_info",
"[",
"'requested_file'",
"]",
")",
"fileglob",
"=",
"'{0}*{1}'",
".",
"format",
"(",
"url_info",
"[",
"'base_filename'",
"]",
",",
"url_info",
"[",
"'ext'",
"]",
")",
"return",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"filedir",
",",
"fileglob",
")",
")"
] | Given a URL, return a list of paths of all cached variations of that file.
Doesn't include the original file. | [
"Given",
"a",
"URL",
"return",
"a",
"list",
"of",
"paths",
"of",
"all",
"cached",
"variations",
"of",
"that",
"file",
"."
] | python | train |
xflr6/features | features/__init__.py | https://github.com/xflr6/features/blob/f985304dd642da6ecdc66d85167d00daa4efe5f4/features/__init__.py#L31-L50 | def make_features(context, frmat='table', str_maximal=False):
"""Return a new feature system from context string in the given format.
Args:
context (str): Formal context table as plain-text string.
frmat: Format of the context string (``'table'``, ``'cxt'``, ``'csv'``).
str_maximal (bool):
Example:
>>> make_features('''
... |+male|-male|+adult|-adult|
... man | X | | X | |
... woman| | X | X | |
... boy | X | | | X |
... girl | | X | | X |
... ''') # doctest: +ELLIPSIS
<FeatureSystem object of 4 atoms 10 featuresets at 0x...>
"""
config = Config.create(context=context, format=frmat, str_maximal=str_maximal)
return FeatureSystem(config) | [
"def",
"make_features",
"(",
"context",
",",
"frmat",
"=",
"'table'",
",",
"str_maximal",
"=",
"False",
")",
":",
"config",
"=",
"Config",
".",
"create",
"(",
"context",
"=",
"context",
",",
"format",
"=",
"frmat",
",",
"str_maximal",
"=",
"str_maximal",
")",
"return",
"FeatureSystem",
"(",
"config",
")"
] | Return a new feature system from context string in the given format.
Args:
context (str): Formal context table as plain-text string.
frmat: Format of the context string (``'table'``, ``'cxt'``, ``'csv'``).
str_maximal (bool):
Example:
>>> make_features('''
... |+male|-male|+adult|-adult|
... man | X | | X | |
... woman| | X | X | |
... boy | X | | | X |
... girl | | X | | X |
... ''') # doctest: +ELLIPSIS
<FeatureSystem object of 4 atoms 10 featuresets at 0x...> | [
"Return",
"a",
"new",
"feature",
"system",
"from",
"context",
"string",
"in",
"the",
"given",
"format",
"."
] | python | train |
markovmodel/msmtools | msmtools/flux/api.py | https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/flux/api.py#L334-L367 | def coarsegrain(F, sets):
r"""Coarse-grains the flux to the given sets.
Parameters
----------
F : (n, n) ndarray or scipy.sparse matrix
Matrix of flux values between pairs of states.
sets : list of array-like of ints
The sets of states onto which the flux is coarse-grained.
Notes
-----
The coarse grained flux is defined as
.. math:: fc_{I,J} = \sum_{i \in I,j \in J} f_{i,j}
Note that if you coarse-grain a net flux, it does n ot necessarily
have a net flux property anymore. If want to make sure you get a
netflux, use to_netflux(coarsegrain(F,sets)).
References
----------
.. [1] F. Noe, Ch. Schuette, E. Vanden-Eijnden, L. Reich and
T. Weikl: Constructing the Full Ensemble of Folding Pathways
from Short Off-Equilibrium Simulations.
Proc. Natl. Acad. Sci. USA, 106, 19011-19016 (2009)
"""
if issparse(F):
return sparse.tpt.coarsegrain(F, sets)
elif isdense(F):
return dense.tpt.coarsegrain(F, sets)
else:
raise _type_not_supported | [
"def",
"coarsegrain",
"(",
"F",
",",
"sets",
")",
":",
"if",
"issparse",
"(",
"F",
")",
":",
"return",
"sparse",
".",
"tpt",
".",
"coarsegrain",
"(",
"F",
",",
"sets",
")",
"elif",
"isdense",
"(",
"F",
")",
":",
"return",
"dense",
".",
"tpt",
".",
"coarsegrain",
"(",
"F",
",",
"sets",
")",
"else",
":",
"raise",
"_type_not_supported"
] | r"""Coarse-grains the flux to the given sets.
Parameters
----------
F : (n, n) ndarray or scipy.sparse matrix
Matrix of flux values between pairs of states.
sets : list of array-like of ints
The sets of states onto which the flux is coarse-grained.
Notes
-----
The coarse grained flux is defined as
.. math:: fc_{I,J} = \sum_{i \in I,j \in J} f_{i,j}
Note that if you coarse-grain a net flux, it does n ot necessarily
have a net flux property anymore. If want to make sure you get a
netflux, use to_netflux(coarsegrain(F,sets)).
References
----------
.. [1] F. Noe, Ch. Schuette, E. Vanden-Eijnden, L. Reich and
T. Weikl: Constructing the Full Ensemble of Folding Pathways
from Short Off-Equilibrium Simulations.
Proc. Natl. Acad. Sci. USA, 106, 19011-19016 (2009) | [
"r",
"Coarse",
"-",
"grains",
"the",
"flux",
"to",
"the",
"given",
"sets",
"."
] | python | train |
Duke-GCB/DukeDSClient | ddsc/cmdparser.py | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/cmdparser.py#L309-L317 | def _add_message_file(arg_parser, help_text):
"""
Add mesage file argument with help_text to arg_parser.
:param arg_parser: ArgumentParser parser to add this argument to.
:param help_text: str: help text for this argument
"""
arg_parser.add_argument('--msg-file',
type=argparse.FileType('r'),
help=help_text) | [
"def",
"_add_message_file",
"(",
"arg_parser",
",",
"help_text",
")",
":",
"arg_parser",
".",
"add_argument",
"(",
"'--msg-file'",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'r'",
")",
",",
"help",
"=",
"help_text",
")"
] | Add mesage file argument with help_text to arg_parser.
:param arg_parser: ArgumentParser parser to add this argument to.
:param help_text: str: help text for this argument | [
"Add",
"mesage",
"file",
"argument",
"with",
"help_text",
"to",
"arg_parser",
".",
":",
"param",
"arg_parser",
":",
"ArgumentParser",
"parser",
"to",
"add",
"this",
"argument",
"to",
".",
":",
"param",
"help_text",
":",
"str",
":",
"help",
"text",
"for",
"this",
"argument"
] | python | train |
pudo/jsonmapping | jsonmapping/transforms.py | https://github.com/pudo/jsonmapping/blob/4cf0a20a393ba82e00651c6fd39522a67a0155de/jsonmapping/transforms.py#L26-L32 | def slugify(mapping, bind, values):
""" Transform all values into URL-capable slugs. """
for value in values:
if isinstance(value, six.string_types):
value = transliterate(value)
value = normality.slugify(value)
yield value | [
"def",
"slugify",
"(",
"mapping",
",",
"bind",
",",
"values",
")",
":",
"for",
"value",
"in",
"values",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"value",
"=",
"transliterate",
"(",
"value",
")",
"value",
"=",
"normality",
".",
"slugify",
"(",
"value",
")",
"yield",
"value"
] | Transform all values into URL-capable slugs. | [
"Transform",
"all",
"values",
"into",
"URL",
"-",
"capable",
"slugs",
"."
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/memory_profiler.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L192-L203 | def wrap_function(self, func):
""" Wrap a function to profile it.
"""
def f(*args, **kwds):
self.enable_by_count()
try:
result = func(*args, **kwds)
finally:
self.disable_by_count()
return result
return f | [
"def",
"wrap_function",
"(",
"self",
",",
"func",
")",
":",
"def",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"self",
".",
"enable_by_count",
"(",
")",
"try",
":",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"finally",
":",
"self",
".",
"disable_by_count",
"(",
")",
"return",
"result",
"return",
"f"
] | Wrap a function to profile it. | [
"Wrap",
"a",
"function",
"to",
"profile",
"it",
"."
] | python | train |
webrecorder/pywb | pywb/utils/loaders.py | https://github.com/webrecorder/pywb/blob/77f8bb647639dd66f6b92b7a9174c28810e4b1d9/pywb/utils/loaders.py#L48-L52 | def to_file_url(filename):
""" Convert a filename to a file:// url
"""
url = 'file://' + os.path.abspath(filename).replace(os.path.sep, '/')
return url | [
"def",
"to_file_url",
"(",
"filename",
")",
":",
"url",
"=",
"'file://'",
"+",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'/'",
")",
"return",
"url"
] | Convert a filename to a file:// url | [
"Convert",
"a",
"filename",
"to",
"a",
"file",
":",
"//",
"url"
] | python | train |
pandas-dev/pandas | pandas/core/frame.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2679-L2701 | def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable) | [
"def",
"get_value",
"(",
"self",
",",
"index",
",",
"col",
",",
"takeable",
"=",
"False",
")",
":",
"warnings",
".",
"warn",
"(",
"\"get_value is deprecated and will be removed \"",
"\"in a future release. Please use \"",
"\".at[] or .iat[] accessors instead\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"self",
".",
"_get_value",
"(",
"index",
",",
"col",
",",
"takeable",
"=",
"takeable",
")"
] | Quickly retrieve single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar | [
"Quickly",
"retrieve",
"single",
"value",
"at",
"passed",
"column",
"and",
"index",
"."
] | python | train |
log2timeline/plaso | plaso/output/dynamic.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/dynamic.py#L138-L157 | def _FormatMessage(self, event):
"""Formats the message.
Args:
event (EventObject): event.
Returns:
str: message field.
Raises:
NoFormatterFound: if no event formatter can be found to match the data
type in the event.
"""
message, _ = self._output_mediator.GetFormattedMessages(event)
if message is None:
data_type = getattr(event, 'data_type', 'UNKNOWN')
raise errors.NoFormatterFound(
'Unable to find event formatter for: {0:s}.'.format(data_type))
return message | [
"def",
"_FormatMessage",
"(",
"self",
",",
"event",
")",
":",
"message",
",",
"_",
"=",
"self",
".",
"_output_mediator",
".",
"GetFormattedMessages",
"(",
"event",
")",
"if",
"message",
"is",
"None",
":",
"data_type",
"=",
"getattr",
"(",
"event",
",",
"'data_type'",
",",
"'UNKNOWN'",
")",
"raise",
"errors",
".",
"NoFormatterFound",
"(",
"'Unable to find event formatter for: {0:s}.'",
".",
"format",
"(",
"data_type",
")",
")",
"return",
"message"
] | Formats the message.
Args:
event (EventObject): event.
Returns:
str: message field.
Raises:
NoFormatterFound: if no event formatter can be found to match the data
type in the event. | [
"Formats",
"the",
"message",
"."
] | python | train |
adafruit/Adafruit_CircuitPython_BME280 | adafruit_bme280.py | https://github.com/adafruit/Adafruit_CircuitPython_BME280/blob/febcd51983dc2bc3cd006bacaada505251c39af1/adafruit_bme280.py#L366-L399 | def humidity(self):
"""
The relative humidity in RH %
returns None if humidity measurement is disabled
"""
self._read_temperature()
hum = self._read_register(_BME280_REGISTER_HUMIDDATA, 2)
#print("Humidity data: ", hum)
adc = float(hum[0] << 8 | hum[1])
#print("adc:", adc)
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
var1 = float(self._t_fine) - 76800.0
#print("var1 ", var1)
var2 = (self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1)
#print("var2 ",var2)
var3 = adc - var2
#print("var3 ",var3)
var4 = self._humidity_calib[1] / 65536.0
#print("var4 ",var4)
var5 = (1.0 + (self._humidity_calib[2] / 67108864.0) * var1)
#print("var5 ",var5)
var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5
#print("var6 ",var6)
var6 = var3 * var4 * (var5 * var6)
humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)
if humidity > _BME280_HUMIDITY_MAX:
return _BME280_HUMIDITY_MAX
if humidity < _BME280_HUMIDITY_MIN:
return _BME280_HUMIDITY_MIN
# else...
return humidity | [
"def",
"humidity",
"(",
"self",
")",
":",
"self",
".",
"_read_temperature",
"(",
")",
"hum",
"=",
"self",
".",
"_read_register",
"(",
"_BME280_REGISTER_HUMIDDATA",
",",
"2",
")",
"#print(\"Humidity data: \", hum)",
"adc",
"=",
"float",
"(",
"hum",
"[",
"0",
"]",
"<<",
"8",
"|",
"hum",
"[",
"1",
"]",
")",
"#print(\"adc:\", adc)",
"# Algorithm from the BME280 driver",
"# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c",
"var1",
"=",
"float",
"(",
"self",
".",
"_t_fine",
")",
"-",
"76800.0",
"#print(\"var1 \", var1)",
"var2",
"=",
"(",
"self",
".",
"_humidity_calib",
"[",
"3",
"]",
"*",
"64.0",
"+",
"(",
"self",
".",
"_humidity_calib",
"[",
"4",
"]",
"/",
"16384.0",
")",
"*",
"var1",
")",
"#print(\"var2 \",var2)",
"var3",
"=",
"adc",
"-",
"var2",
"#print(\"var3 \",var3)",
"var4",
"=",
"self",
".",
"_humidity_calib",
"[",
"1",
"]",
"/",
"65536.0",
"#print(\"var4 \",var4)",
"var5",
"=",
"(",
"1.0",
"+",
"(",
"self",
".",
"_humidity_calib",
"[",
"2",
"]",
"/",
"67108864.0",
")",
"*",
"var1",
")",
"#print(\"var5 \",var5)",
"var6",
"=",
"1.0",
"+",
"(",
"self",
".",
"_humidity_calib",
"[",
"5",
"]",
"/",
"67108864.0",
")",
"*",
"var1",
"*",
"var5",
"#print(\"var6 \",var6)",
"var6",
"=",
"var3",
"*",
"var4",
"*",
"(",
"var5",
"*",
"var6",
")",
"humidity",
"=",
"var6",
"*",
"(",
"1.0",
"-",
"self",
".",
"_humidity_calib",
"[",
"0",
"]",
"*",
"var6",
"/",
"524288.0",
")",
"if",
"humidity",
">",
"_BME280_HUMIDITY_MAX",
":",
"return",
"_BME280_HUMIDITY_MAX",
"if",
"humidity",
"<",
"_BME280_HUMIDITY_MIN",
":",
"return",
"_BME280_HUMIDITY_MIN",
"# else...",
"return",
"humidity"
] | The relative humidity in RH %
returns None if humidity measurement is disabled | [
"The",
"relative",
"humidity",
"in",
"RH",
"%",
"returns",
"None",
"if",
"humidity",
"measurement",
"is",
"disabled"
] | python | train |
saltstack/salt | salt/utils/stringutils.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/stringutils.py#L364-L389 | def expr_match(line, expr):
'''
Checks whether or not the passed value matches the specified expression.
Tries to match expr first as a glob using fnmatch.fnmatch(), and then tries
to match expr as a regular expression. Originally designed to match minion
IDs for whitelists/blacklists.
Note that this also does exact matches, as fnmatch.fnmatch() will return
``True`` when no glob characters are used and the string is an exact match:
.. code-block:: python
>>> fnmatch.fnmatch('foo', 'foo')
True
'''
try:
if fnmatch.fnmatch(line, expr):
return True
try:
if re.match(r'\A{0}\Z'.format(expr), line):
return True
except re.error:
pass
except TypeError:
log.exception('Value %r or expression %r is not a string', line, expr)
return False | [
"def",
"expr_match",
"(",
"line",
",",
"expr",
")",
":",
"try",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"line",
",",
"expr",
")",
":",
"return",
"True",
"try",
":",
"if",
"re",
".",
"match",
"(",
"r'\\A{0}\\Z'",
".",
"format",
"(",
"expr",
")",
",",
"line",
")",
":",
"return",
"True",
"except",
"re",
".",
"error",
":",
"pass",
"except",
"TypeError",
":",
"log",
".",
"exception",
"(",
"'Value %r or expression %r is not a string'",
",",
"line",
",",
"expr",
")",
"return",
"False"
] | Checks whether or not the passed value matches the specified expression.
Tries to match expr first as a glob using fnmatch.fnmatch(), and then tries
to match expr as a regular expression. Originally designed to match minion
IDs for whitelists/blacklists.
Note that this also does exact matches, as fnmatch.fnmatch() will return
``True`` when no glob characters are used and the string is an exact match:
.. code-block:: python
>>> fnmatch.fnmatch('foo', 'foo')
True | [
"Checks",
"whether",
"or",
"not",
"the",
"passed",
"value",
"matches",
"the",
"specified",
"expression",
".",
"Tries",
"to",
"match",
"expr",
"first",
"as",
"a",
"glob",
"using",
"fnmatch",
".",
"fnmatch",
"()",
"and",
"then",
"tries",
"to",
"match",
"expr",
"as",
"a",
"regular",
"expression",
".",
"Originally",
"designed",
"to",
"match",
"minion",
"IDs",
"for",
"whitelists",
"/",
"blacklists",
"."
] | python | train |
ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/external_ca/apis/certificate_issuers_api.py | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/external_ca/apis/certificate_issuers_api.py#L331-L349 | def get_certificate_issuers(self, **kwargs): # noqa: E501
"""Get certificate issuers list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_certificate_issuers(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:return: CertificateIssuerInfoListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_certificate_issuers_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_certificate_issuers_with_http_info(**kwargs) # noqa: E501
return data | [
"def",
"get_certificate_issuers",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"get_certificate_issuers_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_certificate_issuers_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | Get certificate issuers list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_certificate_issuers(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:return: CertificateIssuerInfoListResponse
If the method is called asynchronously,
returns the request thread. | [
"Get",
"certificate",
"issuers",
"list",
".",
"#",
"noqa",
":",
"E501"
] | python | train |
pantsbuild/pex | pex/util.py | https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/util.py#L38-L46 | def zipsafe(dist):
"""Returns whether or not we determine a distribution is zip-safe."""
# zip-safety is only an attribute of eggs. wheels are considered never
# zip safe per implications of PEP 427.
if hasattr(dist, 'egg_info') and dist.egg_info.endswith('EGG-INFO'):
egg_metadata = dist.metadata_listdir('')
return 'zip-safe' in egg_metadata and 'native_libs.txt' not in egg_metadata
else:
return False | [
"def",
"zipsafe",
"(",
"dist",
")",
":",
"# zip-safety is only an attribute of eggs. wheels are considered never",
"# zip safe per implications of PEP 427.",
"if",
"hasattr",
"(",
"dist",
",",
"'egg_info'",
")",
"and",
"dist",
".",
"egg_info",
".",
"endswith",
"(",
"'EGG-INFO'",
")",
":",
"egg_metadata",
"=",
"dist",
".",
"metadata_listdir",
"(",
"''",
")",
"return",
"'zip-safe'",
"in",
"egg_metadata",
"and",
"'native_libs.txt'",
"not",
"in",
"egg_metadata",
"else",
":",
"return",
"False"
] | Returns whether or not we determine a distribution is zip-safe. | [
"Returns",
"whether",
"or",
"not",
"we",
"determine",
"a",
"distribution",
"is",
"zip",
"-",
"safe",
"."
] | python | train |
tylerbutler/engineer | engineer/commands/core.py | https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/commands/core.py#L165-L178 | def parser(self):
"""Returns the appropriate parser to use for adding arguments to your command."""
if self._command_parser is None:
parents = []
if self.need_verbose:
parents.append(_verbose_parser)
if self.need_settings:
parents.append(_settings_parser)
self._command_parser = self._main_parser.add_parser(self.name,
help=self.help,
parents=parents,
formatter_class=argparse.RawDescriptionHelpFormatter)
return self._command_parser | [
"def",
"parser",
"(",
"self",
")",
":",
"if",
"self",
".",
"_command_parser",
"is",
"None",
":",
"parents",
"=",
"[",
"]",
"if",
"self",
".",
"need_verbose",
":",
"parents",
".",
"append",
"(",
"_verbose_parser",
")",
"if",
"self",
".",
"need_settings",
":",
"parents",
".",
"append",
"(",
"_settings_parser",
")",
"self",
".",
"_command_parser",
"=",
"self",
".",
"_main_parser",
".",
"add_parser",
"(",
"self",
".",
"name",
",",
"help",
"=",
"self",
".",
"help",
",",
"parents",
"=",
"parents",
",",
"formatter_class",
"=",
"argparse",
".",
"RawDescriptionHelpFormatter",
")",
"return",
"self",
".",
"_command_parser"
] | Returns the appropriate parser to use for adding arguments to your command. | [
"Returns",
"the",
"appropriate",
"parser",
"to",
"use",
"for",
"adding",
"arguments",
"to",
"your",
"command",
"."
] | python | train |
UCL-INGI/INGInious | inginious/frontend/arch_helper.py | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/arch_helper.py#L58-L115 | def create_arch(configuration, tasks_fs, context, course_factory):
""" Helper that can start a simple complete INGInious arch locally if needed, or a client to a remote backend.
Intended to be used on command line, makes uses of exit() and the logger inginious.frontend.
:param configuration: configuration dict
:param tasks_fs: FileSystemProvider to the courses/tasks folders
:param context: a ZMQ context
:param course_factory: The course factory to be used by the frontend
:param is_testing: boolean
:return: a Client object
"""
logger = logging.getLogger("inginious.frontend")
backend_link = configuration.get("backend", "local")
if backend_link == "local":
logger.info("Starting a simple arch (backend, docker-agent and mcq-agent) locally")
local_config = configuration.get("local-config", {})
concurrency = local_config.get("concurrency", multiprocessing.cpu_count())
debug_host = local_config.get("debug_host", None)
debug_ports = local_config.get("debug_ports", None)
tmp_dir = local_config.get("tmp_dir", "./agent_tmp")
if debug_ports is not None:
try:
debug_ports = debug_ports.split("-")
debug_ports = range(int(debug_ports[0]), int(debug_ports[1]))
except:
logger.error("debug_ports should be in the format 'begin-end', for example '1000-2000'")
exit(1)
else:
debug_ports = range(64100, 64111)
client = Client(context, "inproc://backend_client")
backend = Backend(context, "inproc://backend_agent", "inproc://backend_client")
agent_docker = DockerAgent(context, "inproc://backend_agent", "Docker - Local agent", concurrency, tasks_fs, debug_host, debug_ports, tmp_dir)
agent_mcq = MCQAgent(context, "inproc://backend_agent", "MCQ - Local agent", 1, tasks_fs, course_factory)
asyncio.ensure_future(_restart_on_cancel(logger, agent_docker))
asyncio.ensure_future(_restart_on_cancel(logger, agent_mcq))
asyncio.ensure_future(_restart_on_cancel(logger, backend))
elif backend_link in ["remote", "remote_manuel", "docker_machine"]: #old-style config
logger.error("Value '%s' for the 'backend' option is configuration.yaml is not supported anymore. \n"
"Have a look at the 'update' section of the INGInious documentation in order to upgrade your configuration.yaml", backend_link)
exit(1)
return None #... pycharm returns a warning else :-(
else:
logger.info("Creating a client to backend at %s", backend_link)
client = Client(context, backend_link)
# check for old-style configuration entries
old_style_configs = ["agents", 'containers', "machines", "docker_daemons"]
for c in old_style_configs:
if c in configuration:
logger.warning("Option %s in configuration.yaml is not used anymore.\n"
"Have a look at the 'update' section of the INGInious documentation in order to upgrade your configuration.yaml", c)
return client | [
"def",
"create_arch",
"(",
"configuration",
",",
"tasks_fs",
",",
"context",
",",
"course_factory",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"\"inginious.frontend\"",
")",
"backend_link",
"=",
"configuration",
".",
"get",
"(",
"\"backend\"",
",",
"\"local\"",
")",
"if",
"backend_link",
"==",
"\"local\"",
":",
"logger",
".",
"info",
"(",
"\"Starting a simple arch (backend, docker-agent and mcq-agent) locally\"",
")",
"local_config",
"=",
"configuration",
".",
"get",
"(",
"\"local-config\"",
",",
"{",
"}",
")",
"concurrency",
"=",
"local_config",
".",
"get",
"(",
"\"concurrency\"",
",",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
"debug_host",
"=",
"local_config",
".",
"get",
"(",
"\"debug_host\"",
",",
"None",
")",
"debug_ports",
"=",
"local_config",
".",
"get",
"(",
"\"debug_ports\"",
",",
"None",
")",
"tmp_dir",
"=",
"local_config",
".",
"get",
"(",
"\"tmp_dir\"",
",",
"\"./agent_tmp\"",
")",
"if",
"debug_ports",
"is",
"not",
"None",
":",
"try",
":",
"debug_ports",
"=",
"debug_ports",
".",
"split",
"(",
"\"-\"",
")",
"debug_ports",
"=",
"range",
"(",
"int",
"(",
"debug_ports",
"[",
"0",
"]",
")",
",",
"int",
"(",
"debug_ports",
"[",
"1",
"]",
")",
")",
"except",
":",
"logger",
".",
"error",
"(",
"\"debug_ports should be in the format 'begin-end', for example '1000-2000'\"",
")",
"exit",
"(",
"1",
")",
"else",
":",
"debug_ports",
"=",
"range",
"(",
"64100",
",",
"64111",
")",
"client",
"=",
"Client",
"(",
"context",
",",
"\"inproc://backend_client\"",
")",
"backend",
"=",
"Backend",
"(",
"context",
",",
"\"inproc://backend_agent\"",
",",
"\"inproc://backend_client\"",
")",
"agent_docker",
"=",
"DockerAgent",
"(",
"context",
",",
"\"inproc://backend_agent\"",
",",
"\"Docker - Local agent\"",
",",
"concurrency",
",",
"tasks_fs",
",",
"debug_host",
",",
"debug_ports",
",",
"tmp_dir",
")",
"agent_mcq",
"=",
"MCQAgent",
"(",
"context",
",",
"\"inproc://backend_agent\"",
",",
"\"MCQ - Local agent\"",
",",
"1",
",",
"tasks_fs",
",",
"course_factory",
")",
"asyncio",
".",
"ensure_future",
"(",
"_restart_on_cancel",
"(",
"logger",
",",
"agent_docker",
")",
")",
"asyncio",
".",
"ensure_future",
"(",
"_restart_on_cancel",
"(",
"logger",
",",
"agent_mcq",
")",
")",
"asyncio",
".",
"ensure_future",
"(",
"_restart_on_cancel",
"(",
"logger",
",",
"backend",
")",
")",
"elif",
"backend_link",
"in",
"[",
"\"remote\"",
",",
"\"remote_manuel\"",
",",
"\"docker_machine\"",
"]",
":",
"#old-style config",
"logger",
".",
"error",
"(",
"\"Value '%s' for the 'backend' option is configuration.yaml is not supported anymore. \\n\"",
"\"Have a look at the 'update' section of the INGInious documentation in order to upgrade your configuration.yaml\"",
",",
"backend_link",
")",
"exit",
"(",
"1",
")",
"return",
"None",
"#... pycharm returns a warning else :-(",
"else",
":",
"logger",
".",
"info",
"(",
"\"Creating a client to backend at %s\"",
",",
"backend_link",
")",
"client",
"=",
"Client",
"(",
"context",
",",
"backend_link",
")",
"# check for old-style configuration entries",
"old_style_configs",
"=",
"[",
"\"agents\"",
",",
"'containers'",
",",
"\"machines\"",
",",
"\"docker_daemons\"",
"]",
"for",
"c",
"in",
"old_style_configs",
":",
"if",
"c",
"in",
"configuration",
":",
"logger",
".",
"warning",
"(",
"\"Option %s in configuration.yaml is not used anymore.\\n\"",
"\"Have a look at the 'update' section of the INGInious documentation in order to upgrade your configuration.yaml\"",
",",
"c",
")",
"return",
"client"
] | Helper that can start a simple complete INGInious arch locally if needed, or a client to a remote backend.
Intended to be used on command line, makes uses of exit() and the logger inginious.frontend.
:param configuration: configuration dict
:param tasks_fs: FileSystemProvider to the courses/tasks folders
:param context: a ZMQ context
:param course_factory: The course factory to be used by the frontend
:param is_testing: boolean
:return: a Client object | [
"Helper",
"that",
"can",
"start",
"a",
"simple",
"complete",
"INGInious",
"arch",
"locally",
"if",
"needed",
"or",
"a",
"client",
"to",
"a",
"remote",
"backend",
".",
"Intended",
"to",
"be",
"used",
"on",
"command",
"line",
"makes",
"uses",
"of",
"exit",
"()",
"and",
"the",
"logger",
"inginious",
".",
"frontend",
".",
":",
"param",
"configuration",
":",
"configuration",
"dict",
":",
"param",
"tasks_fs",
":",
"FileSystemProvider",
"to",
"the",
"courses",
"/",
"tasks",
"folders",
":",
"param",
"context",
":",
"a",
"ZMQ",
"context",
":",
"param",
"course_factory",
":",
"The",
"course",
"factory",
"to",
"be",
"used",
"by",
"the",
"frontend",
":",
"param",
"is_testing",
":",
"boolean",
":",
"return",
":",
"a",
"Client",
"object"
] | python | train |
broadinstitute/fiss | firecloud/workspace.py | https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/workspace.py#L42-L54 | def new(namespace, name, protected=False,
attributes=dict(), api_url=fapi.PROD_API_ROOT):
"""Create a new FireCloud workspace.
Returns:
Workspace: A new FireCloud workspace
Raises:
FireCloudServerError: API call failed.
"""
r = fapi.create_workspace(namespace, name, protected, attributes, api_url)
fapi._check_response_code(r, 201)
return Workspace(namespace, name, api_url) | [
"def",
"new",
"(",
"namespace",
",",
"name",
",",
"protected",
"=",
"False",
",",
"attributes",
"=",
"dict",
"(",
")",
",",
"api_url",
"=",
"fapi",
".",
"PROD_API_ROOT",
")",
":",
"r",
"=",
"fapi",
".",
"create_workspace",
"(",
"namespace",
",",
"name",
",",
"protected",
",",
"attributes",
",",
"api_url",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"201",
")",
"return",
"Workspace",
"(",
"namespace",
",",
"name",
",",
"api_url",
")"
] | Create a new FireCloud workspace.
Returns:
Workspace: A new FireCloud workspace
Raises:
FireCloudServerError: API call failed. | [
"Create",
"a",
"new",
"FireCloud",
"workspace",
"."
] | python | train |
abarto/pandas-drf-tools | pandas_drf_tools/generics.py | https://github.com/abarto/pandas-drf-tools/blob/ec754ac75327e6ee5a1efd256a572a9a531e4d28/pandas_drf_tools/generics.py#L55-L59 | def index_row(self, dataframe):
"""
Indexes the row based on the request parameters.
"""
return dataframe.loc[self.kwargs[self.lookup_url_kwarg]].to_frame().T | [
"def",
"index_row",
"(",
"self",
",",
"dataframe",
")",
":",
"return",
"dataframe",
".",
"loc",
"[",
"self",
".",
"kwargs",
"[",
"self",
".",
"lookup_url_kwarg",
"]",
"]",
".",
"to_frame",
"(",
")",
".",
"T"
] | Indexes the row based on the request parameters. | [
"Indexes",
"the",
"row",
"based",
"on",
"the",
"request",
"parameters",
"."
] | python | valid |
arne-cl/discoursegraphs | src/discoursegraphs/discoursegraph.py | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/discoursegraph.py#L159-L181 | def _get_all_offsets(self, offset_ns=None):
"""
returns all token offsets of this document as a generator of
(token node ID str, character onset int, character offset int) tuples.
Parameters
----------
offset_ns : str or None
The namespace from which the offsets will be retrieved. If no
namespace is given, the default namespace of this document graph is
chosen
Returns
-------
offsets : generator(tuple(str, int, int))
a generator of (token node ID str, character onset int, character
offset int) tuples, which represents all the tokens in the order
they occur in the document.
"""
for token_id, _token_str in self.get_tokens():
onset = self.node[token_id]['{0}:{1}'.format(offset_ns, 'onset')]
offset = self.node[token_id]['{0}:{1}'.format(offset_ns, 'offset')]
yield (token_id, onset, offset) | [
"def",
"_get_all_offsets",
"(",
"self",
",",
"offset_ns",
"=",
"None",
")",
":",
"for",
"token_id",
",",
"_token_str",
"in",
"self",
".",
"get_tokens",
"(",
")",
":",
"onset",
"=",
"self",
".",
"node",
"[",
"token_id",
"]",
"[",
"'{0}:{1}'",
".",
"format",
"(",
"offset_ns",
",",
"'onset'",
")",
"]",
"offset",
"=",
"self",
".",
"node",
"[",
"token_id",
"]",
"[",
"'{0}:{1}'",
".",
"format",
"(",
"offset_ns",
",",
"'offset'",
")",
"]",
"yield",
"(",
"token_id",
",",
"onset",
",",
"offset",
")"
] | returns all token offsets of this document as a generator of
(token node ID str, character onset int, character offset int) tuples.
Parameters
----------
offset_ns : str or None
The namespace from which the offsets will be retrieved. If no
namespace is given, the default namespace of this document graph is
chosen
Returns
-------
offsets : generator(tuple(str, int, int))
a generator of (token node ID str, character onset int, character
offset int) tuples, which represents all the tokens in the order
they occur in the document. | [
"returns",
"all",
"token",
"offsets",
"of",
"this",
"document",
"as",
"a",
"generator",
"of",
"(",
"token",
"node",
"ID",
"str",
"character",
"onset",
"int",
"character",
"offset",
"int",
")",
"tuples",
"."
] | python | train |
dunovank/jupyter-themes | jupyterthemes/jtplot.py | https://github.com/dunovank/jupyter-themes/blob/421016c2e4fed75fa1830d664c10478d9bd25ed1/jupyterthemes/jtplot.py#L276-L287 | def reset():
""" full reset of matplotlib default style and colors
"""
colors = [(0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, .75, 0.),
(.75, .75, 0.), (0., .75, .75), (0., 0., 0.)]
for code, color in zip("bgrmyck", colors):
rgb = mpl.colors.colorConverter.to_rgb(color)
mpl.colors.colorConverter.colors[code] = rgb
mpl.colors.colorConverter.cache[code] = rgb
mpl.rcParams.update(mpl.rcParamsDefault)
mpl.rcParams['figure.facecolor'] = 'white'
mpl.rcParams['axes.facecolor'] = 'white' | [
"def",
"reset",
"(",
")",
":",
"colors",
"=",
"[",
"(",
"0.",
",",
"0.",
",",
"1.",
")",
",",
"(",
"0.",
",",
".5",
",",
"0.",
")",
",",
"(",
"1.",
",",
"0.",
",",
"0.",
")",
",",
"(",
".75",
",",
".75",
",",
"0.",
")",
",",
"(",
".75",
",",
".75",
",",
"0.",
")",
",",
"(",
"0.",
",",
".75",
",",
".75",
")",
",",
"(",
"0.",
",",
"0.",
",",
"0.",
")",
"]",
"for",
"code",
",",
"color",
"in",
"zip",
"(",
"\"bgrmyck\"",
",",
"colors",
")",
":",
"rgb",
"=",
"mpl",
".",
"colors",
".",
"colorConverter",
".",
"to_rgb",
"(",
"color",
")",
"mpl",
".",
"colors",
".",
"colorConverter",
".",
"colors",
"[",
"code",
"]",
"=",
"rgb",
"mpl",
".",
"colors",
".",
"colorConverter",
".",
"cache",
"[",
"code",
"]",
"=",
"rgb",
"mpl",
".",
"rcParams",
".",
"update",
"(",
"mpl",
".",
"rcParamsDefault",
")",
"mpl",
".",
"rcParams",
"[",
"'figure.facecolor'",
"]",
"=",
"'white'",
"mpl",
".",
"rcParams",
"[",
"'axes.facecolor'",
"]",
"=",
"'white'"
] | full reset of matplotlib default style and colors | [
"full",
"reset",
"of",
"matplotlib",
"default",
"style",
"and",
"colors"
] | python | train |
projectshift/shift-boiler | boiler/user/role_service.py | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/role_service.py#L14-L28 | def save(self, role, commit=True):
""" Persist role model """
self.is_instance(role)
schema = RoleSchema()
valid = schema.process(role)
if not valid:
return valid
db.session.add(role)
if commit:
db.session.commit()
events.role_saved_event.send(role)
return role | [
"def",
"save",
"(",
"self",
",",
"role",
",",
"commit",
"=",
"True",
")",
":",
"self",
".",
"is_instance",
"(",
"role",
")",
"schema",
"=",
"RoleSchema",
"(",
")",
"valid",
"=",
"schema",
".",
"process",
"(",
"role",
")",
"if",
"not",
"valid",
":",
"return",
"valid",
"db",
".",
"session",
".",
"add",
"(",
"role",
")",
"if",
"commit",
":",
"db",
".",
"session",
".",
"commit",
"(",
")",
"events",
".",
"role_saved_event",
".",
"send",
"(",
"role",
")",
"return",
"role"
] | Persist role model | [
"Persist",
"role",
"model"
] | python | train |
PMEAL/OpenPNM | openpnm/network/GenericNetwork.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/network/GenericNetwork.py#L630-L700 | def find_neighbor_throats(self, pores, mode='union', flatten=True):
r"""
Returns a list of throats neighboring the given pore(s)
Parameters
----------
pores : array_like
Indices of pores whose neighbors are sought
flatten : boolean, optional
If ``True`` (default) a 1D array of unique throat indices is
returned. If ``False`` the returned array contains arrays of
neighboring throat indices for each input pore, in the order
they were sent.
mode : string
Specifies logic to filter the resulting list. Options are:
**'or'** : (default) All neighbors of the input pores. This is
also known as the 'union' in set theory or 'any' in boolean logic.
Both keywords are accepted and treated as 'or'.
**'xor'** : Only neighbors of one and only one input pore. This
is useful for finding the thraots that are not shared by any of the
input pores.
**'xnor'** : Neighbors that are shared by two or more input pores.
This is equivalent to finding all neighbors with 'or', minus those
found with 'xor', and is useful for finding neighbors that the
inputs have in common.
**'and'** : Only neighbors shared by all input pores. This is also
known as 'intersection' in set theory and (somtimes) as 'all' in
boolean logic. Both keywords are accepted and treated as 'and'.
Returns
-------
If ``flatten`` is ``True``, returns a 1D array of throat indices
filtered according to the specified mode. If ``flatten`` is ``False``,
returns a list of lists, where each list contains the neighbors of the
corresponding input pores.
Notes
-----
The ``logic`` options are applied to neighboring bonds only, thus it
is not possible to obtain bonds that are part of the global set but
not neighbors. This is because (a) the list of global bonds might be
very large, and (b) it is not possible to return a list of neighbors
for each input site if global sites are considered.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> Ts = pn.find_neighbor_throats(pores=[0, 1])
>>> print(Ts)
[ 0 1 100 101 200 201]
>>> Ts = pn.find_neighbor_throats(pores=[0, 1], flatten=False)
>>> print(Ts)
[array([ 0, 100, 200]), array([ 0, 1, 101, 201])]
"""
pores = self._parse_indices(pores)
if sp.size(pores) == 0:
return sp.array([], ndmin=1, dtype=int)
if 'lil' not in self._im.keys():
self.get_incidence_matrix(fmt='lil')
neighbors = topotools.find_neighbor_bonds(sites=pores, logic=mode,
im=self._im['lil'],
flatten=flatten)
return neighbors | [
"def",
"find_neighbor_throats",
"(",
"self",
",",
"pores",
",",
"mode",
"=",
"'union'",
",",
"flatten",
"=",
"True",
")",
":",
"pores",
"=",
"self",
".",
"_parse_indices",
"(",
"pores",
")",
"if",
"sp",
".",
"size",
"(",
"pores",
")",
"==",
"0",
":",
"return",
"sp",
".",
"array",
"(",
"[",
"]",
",",
"ndmin",
"=",
"1",
",",
"dtype",
"=",
"int",
")",
"if",
"'lil'",
"not",
"in",
"self",
".",
"_im",
".",
"keys",
"(",
")",
":",
"self",
".",
"get_incidence_matrix",
"(",
"fmt",
"=",
"'lil'",
")",
"neighbors",
"=",
"topotools",
".",
"find_neighbor_bonds",
"(",
"sites",
"=",
"pores",
",",
"logic",
"=",
"mode",
",",
"im",
"=",
"self",
".",
"_im",
"[",
"'lil'",
"]",
",",
"flatten",
"=",
"flatten",
")",
"return",
"neighbors"
] | r"""
Returns a list of throats neighboring the given pore(s)
Parameters
----------
pores : array_like
Indices of pores whose neighbors are sought
flatten : boolean, optional
If ``True`` (default) a 1D array of unique throat indices is
returned. If ``False`` the returned array contains arrays of
neighboring throat indices for each input pore, in the order
they were sent.
mode : string
Specifies logic to filter the resulting list. Options are:
**'or'** : (default) All neighbors of the input pores. This is
also known as the 'union' in set theory or 'any' in boolean logic.
Both keywords are accepted and treated as 'or'.
**'xor'** : Only neighbors of one and only one input pore. This
is useful for finding the thraots that are not shared by any of the
input pores.
**'xnor'** : Neighbors that are shared by two or more input pores.
This is equivalent to finding all neighbors with 'or', minus those
found with 'xor', and is useful for finding neighbors that the
inputs have in common.
**'and'** : Only neighbors shared by all input pores. This is also
known as 'intersection' in set theory and (somtimes) as 'all' in
boolean logic. Both keywords are accepted and treated as 'and'.
Returns
-------
If ``flatten`` is ``True``, returns a 1D array of throat indices
filtered according to the specified mode. If ``flatten`` is ``False``,
returns a list of lists, where each list contains the neighbors of the
corresponding input pores.
Notes
-----
The ``logic`` options are applied to neighboring bonds only, thus it
is not possible to obtain bonds that are part of the global set but
not neighbors. This is because (a) the list of global bonds might be
very large, and (b) it is not possible to return a list of neighbors
for each input site if global sites are considered.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> Ts = pn.find_neighbor_throats(pores=[0, 1])
>>> print(Ts)
[ 0 1 100 101 200 201]
>>> Ts = pn.find_neighbor_throats(pores=[0, 1], flatten=False)
>>> print(Ts)
[array([ 0, 100, 200]), array([ 0, 1, 101, 201])] | [
"r",
"Returns",
"a",
"list",
"of",
"throats",
"neighboring",
"the",
"given",
"pore",
"(",
"s",
")"
] | python | train |
googleapis/google-cloud-python | bigtable/google/cloud/bigtable/instance.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/instance.py#L453-L477 | def set_iam_policy(self, policy):
"""Sets the access control policy on an instance resource. Replaces any
existing policy.
For more information about policy, please see documentation of
class `google.cloud.bigtable.policy.Policy`
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_set_iam_policy]
:end-before: [END bigtable_set_iam_policy]
:type policy: :class:`google.cloud.bigtable.policy.Policy`
:param policy: A new IAM policy to replace the current IAM policy
of this instance
:rtype: :class:`google.cloud.bigtable.policy.Policy`
:returns: The current IAM policy of this instance.
"""
instance_admin_client = self._client.instance_admin_client
resp = instance_admin_client.set_iam_policy(
resource=self.name, policy=policy.to_pb()
)
return Policy.from_pb(resp) | [
"def",
"set_iam_policy",
"(",
"self",
",",
"policy",
")",
":",
"instance_admin_client",
"=",
"self",
".",
"_client",
".",
"instance_admin_client",
"resp",
"=",
"instance_admin_client",
".",
"set_iam_policy",
"(",
"resource",
"=",
"self",
".",
"name",
",",
"policy",
"=",
"policy",
".",
"to_pb",
"(",
")",
")",
"return",
"Policy",
".",
"from_pb",
"(",
"resp",
")"
] | Sets the access control policy on an instance resource. Replaces any
existing policy.
For more information about policy, please see documentation of
class `google.cloud.bigtable.policy.Policy`
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_set_iam_policy]
:end-before: [END bigtable_set_iam_policy]
:type policy: :class:`google.cloud.bigtable.policy.Policy`
:param policy: A new IAM policy to replace the current IAM policy
of this instance
:rtype: :class:`google.cloud.bigtable.policy.Policy`
:returns: The current IAM policy of this instance. | [
"Sets",
"the",
"access",
"control",
"policy",
"on",
"an",
"instance",
"resource",
".",
"Replaces",
"any",
"existing",
"policy",
"."
] | python | train |
mromanello/hucitlib | knowledge_base/surfext/__init__.py | https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/surfext/__init__.py#L81-L107 | def add_name(self, name, lang=None):
"""
Adds a new name variant to an author.
:param name: the name to be added
:param lang: the language of the name variant
:return: `True` if the name is added, `False` otherwise (the name is a duplicate)
"""
try:
assert (lang, name) not in self.get_names()
except Exception as e:
# TODO: raise a custom exception
logger.warning("Duplicate name detected while adding \"%s (lang=%s)\""%(name, lang))
return False
newlabel = Literal(name, lang=lang) if lang is not None else \
Literal(name)
name = [
id
for id in self.ecrm_P1_is_identified_by
if id.uri == surf.ns.EFRBROO['F12_Name']
][0]
try:
name.rdfs_label.append(newlabel)
name.update()
return True
except Exception as e:
raise e | [
"def",
"add_name",
"(",
"self",
",",
"name",
",",
"lang",
"=",
"None",
")",
":",
"try",
":",
"assert",
"(",
"lang",
",",
"name",
")",
"not",
"in",
"self",
".",
"get_names",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"# TODO: raise a custom exception",
"logger",
".",
"warning",
"(",
"\"Duplicate name detected while adding \\\"%s (lang=%s)\\\"\"",
"%",
"(",
"name",
",",
"lang",
")",
")",
"return",
"False",
"newlabel",
"=",
"Literal",
"(",
"name",
",",
"lang",
"=",
"lang",
")",
"if",
"lang",
"is",
"not",
"None",
"else",
"Literal",
"(",
"name",
")",
"name",
"=",
"[",
"id",
"for",
"id",
"in",
"self",
".",
"ecrm_P1_is_identified_by",
"if",
"id",
".",
"uri",
"==",
"surf",
".",
"ns",
".",
"EFRBROO",
"[",
"'F12_Name'",
"]",
"]",
"[",
"0",
"]",
"try",
":",
"name",
".",
"rdfs_label",
".",
"append",
"(",
"newlabel",
")",
"name",
".",
"update",
"(",
")",
"return",
"True",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Adds a new name variant to an author.
:param name: the name to be added
:param lang: the language of the name variant
:return: `True` if the name is added, `False` otherwise (the name is a duplicate) | [
"Adds",
"a",
"new",
"name",
"variant",
"to",
"an",
"author",
"."
] | python | train |
calston/tensor | tensor/outputs/elasticsearch.py | https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/outputs/elasticsearch.py#L61-L71 | def createClient(self):
"""Sets up HTTP connector and starts queue timer
"""
server = self.config.get('server', 'localhost')
port = int(self.config.get('port', 9200))
self.client = elasticsearch.ElasticSearch(self.url, self.user,
self.password, self.index)
self.t.start(self.inter) | [
"def",
"createClient",
"(",
"self",
")",
":",
"server",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'server'",
",",
"'localhost'",
")",
"port",
"=",
"int",
"(",
"self",
".",
"config",
".",
"get",
"(",
"'port'",
",",
"9200",
")",
")",
"self",
".",
"client",
"=",
"elasticsearch",
".",
"ElasticSearch",
"(",
"self",
".",
"url",
",",
"self",
".",
"user",
",",
"self",
".",
"password",
",",
"self",
".",
"index",
")",
"self",
".",
"t",
".",
"start",
"(",
"self",
".",
"inter",
")"
] | Sets up HTTP connector and starts queue timer | [
"Sets",
"up",
"HTTP",
"connector",
"and",
"starts",
"queue",
"timer"
] | python | test |
f3at/feat | src/feat/models/model.py | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/model.py#L633-L668 | def initiate(self, aspect=None, view=None, parent=None, officer=None):
"""Do not keep any reference to its parent,
this way it can be garbage-collected."""
def got_view(view):
if view is None:
return None
return init(view)
def init(view):
self.view = view
d = self.call_mro("init")
d.addCallback(retrieve_reference)
d.addCallback(update_reference)
return d
def retrieve_reference(_param):
if callable(self._model_reference):
context = self.make_context()
return self._model_reference(self.source, context)
return self._model_reference
def update_reference(reference):
self.reference = reference
return self
if officer is not None:
self.officer = IOfficer(officer)
self.aspect = IAspect(aspect) if aspect is not None else None
if self._model_view is not None:
if callable(self._model_view):
context = self.make_context(view=view)
d = self._model_view(None, context)
return d.addCallback(got_view)
return init(self._model_view)
return init(view) | [
"def",
"initiate",
"(",
"self",
",",
"aspect",
"=",
"None",
",",
"view",
"=",
"None",
",",
"parent",
"=",
"None",
",",
"officer",
"=",
"None",
")",
":",
"def",
"got_view",
"(",
"view",
")",
":",
"if",
"view",
"is",
"None",
":",
"return",
"None",
"return",
"init",
"(",
"view",
")",
"def",
"init",
"(",
"view",
")",
":",
"self",
".",
"view",
"=",
"view",
"d",
"=",
"self",
".",
"call_mro",
"(",
"\"init\"",
")",
"d",
".",
"addCallback",
"(",
"retrieve_reference",
")",
"d",
".",
"addCallback",
"(",
"update_reference",
")",
"return",
"d",
"def",
"retrieve_reference",
"(",
"_param",
")",
":",
"if",
"callable",
"(",
"self",
".",
"_model_reference",
")",
":",
"context",
"=",
"self",
".",
"make_context",
"(",
")",
"return",
"self",
".",
"_model_reference",
"(",
"self",
".",
"source",
",",
"context",
")",
"return",
"self",
".",
"_model_reference",
"def",
"update_reference",
"(",
"reference",
")",
":",
"self",
".",
"reference",
"=",
"reference",
"return",
"self",
"if",
"officer",
"is",
"not",
"None",
":",
"self",
".",
"officer",
"=",
"IOfficer",
"(",
"officer",
")",
"self",
".",
"aspect",
"=",
"IAspect",
"(",
"aspect",
")",
"if",
"aspect",
"is",
"not",
"None",
"else",
"None",
"if",
"self",
".",
"_model_view",
"is",
"not",
"None",
":",
"if",
"callable",
"(",
"self",
".",
"_model_view",
")",
":",
"context",
"=",
"self",
".",
"make_context",
"(",
"view",
"=",
"view",
")",
"d",
"=",
"self",
".",
"_model_view",
"(",
"None",
",",
"context",
")",
"return",
"d",
".",
"addCallback",
"(",
"got_view",
")",
"return",
"init",
"(",
"self",
".",
"_model_view",
")",
"return",
"init",
"(",
"view",
")"
] | Do not keep any reference to its parent,
this way it can be garbage-collected. | [
"Do",
"not",
"keep",
"any",
"reference",
"to",
"its",
"parent",
"this",
"way",
"it",
"can",
"be",
"garbage",
"-",
"collected",
"."
] | python | train |
Vagrants/blackbird | blackbird/plugins/base.py | https://github.com/Vagrants/blackbird/blob/3b38cd5650caae362e0668dbd38bf8f88233e079/blackbird/plugins/base.py#L89-L95 | def _generate(self):
u"""overrided in each modules."""
self._data['key'] = self.key
self._data['value'] = self.value
self._data['host'] = self.host
self._data['clock'] = self.clock | [
"def",
"_generate",
"(",
"self",
")",
":",
"self",
".",
"_data",
"[",
"'key'",
"]",
"=",
"self",
".",
"key",
"self",
".",
"_data",
"[",
"'value'",
"]",
"=",
"self",
".",
"value",
"self",
".",
"_data",
"[",
"'host'",
"]",
"=",
"self",
".",
"host",
"self",
".",
"_data",
"[",
"'clock'",
"]",
"=",
"self",
".",
"clock"
] | u"""overrided in each modules. | [
"u",
"overrided",
"in",
"each",
"modules",
"."
] | python | train |
saltstack/salt | salt/modules/git.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/git.py#L5269-L5380 | def worktree_prune(cwd,
dry_run=False,
verbose=True,
expire=None,
opts='',
git_opts='',
user=None,
password=None,
ignore_retcode=False,
output_encoding=None):
'''
.. versionadded:: 2015.8.0
Interface to `git-worktree(1)`_, prunes stale worktree administrative data
from the gitdir
cwd
The path to the main git checkout or a linked worktree
dry_run : False
If ``True``, then this function will report what would have been
pruned, but no changes will be made.
verbose : True
Report all changes made. Set to ``False`` to suppress this output.
expire
Only prune unused worktree data older than a specific period of time.
The date format for this parameter is described in the documentation
for the ``gc.pruneWorktreesExpire`` config param in the
`git-config(1)`_ manpage.
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` to avoid causing errors
with Salt's own argument parsing.
All CLI options for pruning worktrees as of Git 2.5.0 are already
supported by this function as of Salt 2015.8.0, so using this
argument is unnecessary unless new CLI arguments are added to
`git-worktree(1)`_ and are not yet supported in Salt.
git_opts
Any additional options to add to git command itself (not the
``worktree`` subcommand), in a single string. This is useful for
passing ``-c`` to run git with temporary changes to the git
configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
.. _`git-config(1)`: http://git-scm.com/docs/git-config/2.5.1
CLI Examples:
.. code-block:: bash
salt myminion git.worktree_prune /path/to/repo
salt myminion git.worktree_prune /path/to/repo dry_run=True
salt myminion git.worktree_prune /path/to/repo expire=1.day.ago
'''
_check_worktree_support()
cwd = _expand_path(cwd, user)
command = ['git'] + _format_git_opts(git_opts)
command.extend(['worktree', 'prune'])
if dry_run:
command.append('--dry-run')
if verbose:
command.append('--verbose')
if expire:
command.extend(['--expire', expire])
command.extend(_format_opts(opts))
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)['stdout'] | [
"def",
"worktree_prune",
"(",
"cwd",
",",
"dry_run",
"=",
"False",
",",
"verbose",
"=",
"True",
",",
"expire",
"=",
"None",
",",
"opts",
"=",
"''",
",",
"git_opts",
"=",
"''",
",",
"user",
"=",
"None",
",",
"password",
"=",
"None",
",",
"ignore_retcode",
"=",
"False",
",",
"output_encoding",
"=",
"None",
")",
":",
"_check_worktree_support",
"(",
")",
"cwd",
"=",
"_expand_path",
"(",
"cwd",
",",
"user",
")",
"command",
"=",
"[",
"'git'",
"]",
"+",
"_format_git_opts",
"(",
"git_opts",
")",
"command",
".",
"extend",
"(",
"[",
"'worktree'",
",",
"'prune'",
"]",
")",
"if",
"dry_run",
":",
"command",
".",
"append",
"(",
"'--dry-run'",
")",
"if",
"verbose",
":",
"command",
".",
"append",
"(",
"'--verbose'",
")",
"if",
"expire",
":",
"command",
".",
"extend",
"(",
"[",
"'--expire'",
",",
"expire",
"]",
")",
"command",
".",
"extend",
"(",
"_format_opts",
"(",
"opts",
")",
")",
"return",
"_git_run",
"(",
"command",
",",
"cwd",
"=",
"cwd",
",",
"user",
"=",
"user",
",",
"password",
"=",
"password",
",",
"ignore_retcode",
"=",
"ignore_retcode",
",",
"output_encoding",
"=",
"output_encoding",
")",
"[",
"'stdout'",
"]"
] | .. versionadded:: 2015.8.0
Interface to `git-worktree(1)`_, prunes stale worktree administrative data
from the gitdir
cwd
The path to the main git checkout or a linked worktree
dry_run : False
If ``True``, then this function will report what would have been
pruned, but no changes will be made.
verbose : True
Report all changes made. Set to ``False`` to suppress this output.
expire
Only prune unused worktree data older than a specific period of time.
The date format for this parameter is described in the documentation
for the ``gc.pruneWorktreesExpire`` config param in the
`git-config(1)`_ manpage.
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` to avoid causing errors
with Salt's own argument parsing.
All CLI options for pruning worktrees as of Git 2.5.0 are already
supported by this function as of Salt 2015.8.0, so using this
argument is unnecessary unless new CLI arguments are added to
`git-worktree(1)`_ and are not yet supported in Salt.
git_opts
Any additional options to add to git command itself (not the
``worktree`` subcommand), in a single string. This is useful for
passing ``-c`` to run git with temporary changes to the git
configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-worktree(1)`: http://git-scm.com/docs/git-worktree
.. _`git-config(1)`: http://git-scm.com/docs/git-config/2.5.1
CLI Examples:
.. code-block:: bash
salt myminion git.worktree_prune /path/to/repo
salt myminion git.worktree_prune /path/to/repo dry_run=True
salt myminion git.worktree_prune /path/to/repo expire=1.day.ago | [
"..",
"versionadded",
"::",
"2015",
".",
"8",
".",
"0"
] | python | train |
CityOfZion/neo-python-rpc | neorpc/Client.py | https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L103-L114 | def get_block_hash(self, height, id=None, endpoint=None):
"""
Get hash of a block by its height
Args:
height: (int) height of the block to lookup
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(GET_BLOCK_HASH, params=[height], id=id, endpoint=endpoint) | [
"def",
"get_block_hash",
"(",
"self",
",",
"height",
",",
"id",
"=",
"None",
",",
"endpoint",
"=",
"None",
")",
":",
"return",
"self",
".",
"_call_endpoint",
"(",
"GET_BLOCK_HASH",
",",
"params",
"=",
"[",
"height",
"]",
",",
"id",
"=",
"id",
",",
"endpoint",
"=",
"endpoint",
")"
] | Get hash of a block by its height
Args:
height: (int) height of the block to lookup
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call | [
"Get",
"hash",
"of",
"a",
"block",
"by",
"its",
"height",
"Args",
":",
"height",
":",
"(",
"int",
")",
"height",
"of",
"the",
"block",
"to",
"lookup",
"id",
":",
"(",
"int",
"optional",
")",
"id",
"to",
"use",
"for",
"response",
"tracking",
"endpoint",
":",
"(",
"RPCEndpoint",
"optional",
")",
"endpoint",
"to",
"specify",
"to",
"use"
] | python | train |
StackStorm/pybind | pybind/nos/v7_2_0/rbridge_id/ip/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/rbridge_id/ip/__init__.py#L246-L267 | def _set_as_path(self, v, load=False):
"""
Setter method for as_path, mapped from YANG variable /rbridge_id/ip/as_path (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_as_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_as_path() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=as_path.as_path, is_container='container', presence=False, yang_name="as-path", rest_name="as-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP AS Path filter', u'cli-incomplete-no': None, u'sort-priority': u'63'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """as_path must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=as_path.as_path, is_container='container', presence=False, yang_name="as-path", rest_name="as-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP AS Path filter', u'cli-incomplete-no': None, u'sort-priority': u'63'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)""",
})
self.__as_path = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_as_path",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"as_path",
".",
"as_path",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"as-path\"",
",",
"rest_name",
"=",
"\"as-path\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'BGP AS Path filter'",
",",
"u'cli-incomplete-no'",
":",
"None",
",",
"u'sort-priority'",
":",
"u'63'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-ip-policy'",
",",
"defining_module",
"=",
"'brocade-ip-policy'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"as_path must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=as_path.as_path, is_container='container', presence=False, yang_name=\"as-path\", rest_name=\"as-path\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'BGP AS Path filter', u'cli-incomplete-no': None, u'sort-priority': u'63'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__as_path",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for as_path, mapped from YANG variable /rbridge_id/ip/as_path (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_as_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_as_path() directly. | [
"Setter",
"method",
"for",
"as_path",
"mapped",
"from",
"YANG",
"variable",
"/",
"rbridge_id",
"/",
"ip",
"/",
"as_path",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_as_path",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_as_path",
"()",
"directly",
"."
] | python | train |
ray-project/ray | python/ray/tune/automl/genetic_searcher.py | https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/automl/genetic_searcher.py#L140-L178 | def _selection(candidate):
"""Perform selection action to candidates.
For example, new gene = sample_1 + the 5th bit of sample2.
Args:
candidate: List of candidate genes (encodings).
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])
>>> new_gene = _selection([gene1, gene2])
>>> # new_gene could be gene1 overwritten with the
>>> # 2nd parameter of gene2
>>> # in which case:
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene2[1]
>>> # new_gene[2] = gene1[0]
Returns:
New gene (encoding)
"""
sample_index1 = np.random.choice(len(candidate))
sample_index2 = np.random.choice(len(candidate))
sample_1 = candidate[sample_index1]
sample_2 = candidate[sample_index2]
select_index = np.random.choice(len(sample_1))
logger.info(
LOGGING_PREFIX + "Perform selection from %sth to %sth at index=%s",
sample_index2, sample_index1, select_index)
next_gen = []
for i in range(len(sample_1)):
if i is select_index:
next_gen.append(sample_2[i])
else:
next_gen.append(sample_1[i])
return next_gen | [
"def",
"_selection",
"(",
"candidate",
")",
":",
"sample_index1",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"len",
"(",
"candidate",
")",
")",
"sample_index2",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"len",
"(",
"candidate",
")",
")",
"sample_1",
"=",
"candidate",
"[",
"sample_index1",
"]",
"sample_2",
"=",
"candidate",
"[",
"sample_index2",
"]",
"select_index",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"len",
"(",
"sample_1",
")",
")",
"logger",
".",
"info",
"(",
"LOGGING_PREFIX",
"+",
"\"Perform selection from %sth to %sth at index=%s\"",
",",
"sample_index2",
",",
"sample_index1",
",",
"select_index",
")",
"next_gen",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"sample_1",
")",
")",
":",
"if",
"i",
"is",
"select_index",
":",
"next_gen",
".",
"append",
"(",
"sample_2",
"[",
"i",
"]",
")",
"else",
":",
"next_gen",
".",
"append",
"(",
"sample_1",
"[",
"i",
"]",
")",
"return",
"next_gen"
] | Perform selection action to candidates.
For example, new gene = sample_1 + the 5th bit of sample2.
Args:
candidate: List of candidate genes (encodings).
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])
>>> new_gene = _selection([gene1, gene2])
>>> # new_gene could be gene1 overwritten with the
>>> # 2nd parameter of gene2
>>> # in which case:
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene2[1]
>>> # new_gene[2] = gene1[0]
Returns:
New gene (encoding) | [
"Perform",
"selection",
"action",
"to",
"candidates",
"."
] | python | train |
pydata/xarray | xarray/plot/utils.py | https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/plot/utils.py#L412-L422 | def _interval_to_bound_points(array):
"""
Helper function which returns an array
with the Intervals' boundaries.
"""
array_boundaries = np.array([x.left for x in array])
array_boundaries = np.concatenate(
(array_boundaries, np.array([array[-1].right])))
return array_boundaries | [
"def",
"_interval_to_bound_points",
"(",
"array",
")",
":",
"array_boundaries",
"=",
"np",
".",
"array",
"(",
"[",
"x",
".",
"left",
"for",
"x",
"in",
"array",
"]",
")",
"array_boundaries",
"=",
"np",
".",
"concatenate",
"(",
"(",
"array_boundaries",
",",
"np",
".",
"array",
"(",
"[",
"array",
"[",
"-",
"1",
"]",
".",
"right",
"]",
")",
")",
")",
"return",
"array_boundaries"
] | Helper function which returns an array
with the Intervals' boundaries. | [
"Helper",
"function",
"which",
"returns",
"an",
"array",
"with",
"the",
"Intervals",
"boundaries",
"."
] | python | train |
LEMS/pylems | lems/model/model.py | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/model.py#L728-L799 | def resolve_simulation(self, fc, ct):
"""
Resolve simulation specifications.
"""
for run in ct.simulation.runs:
try:
run2 = Run(fc.component_references[run.component].referenced_component,
run.variable,
fc.parameters[run.increment].numeric_value,
fc.parameters[run.total].numeric_value)
except:
raise ModelError("Unable to resolve simulation run parameters in component '{0}'",
fc.id)
fc.simulation.add(run2)
for record in ct.simulation.records:
try:
record2 = Record(fc.paths[record.quantity].value,
fc.parameters[record.scale].numeric_value if record.scale else 1,
fc.texts[record.color].value if record.color else '#000000')
except:
raise ModelError("Unable to resolve simulation record parameters in component '{0}'",
fc.id)
fc.simulation.add(record2)
for event_record in ct.simulation.event_records:
try:
event_record2 = EventRecord(fc.paths[event_record.quantity].value,
fc.texts[event_record.eventPort].value)
except:
raise ModelError("Unable to resolve simulation event_record parameters in component '{0}'",
fc.id)
fc.simulation.add(event_record2)
for dd in ct.simulation.data_displays:
try:
dd2 = DataDisplay(fc.texts[dd.title].value,
'')
if 'timeScale' in fc.parameters:
dd2.timeScale = fc.parameters['timeScale'].numeric_value
except:
raise ModelError("Unable to resolve simulation display parameters in component '{0}'",
fc.id)
fc.simulation.add(dd2)
for dw in ct.simulation.data_writers:
try:
path = '.'
if fc.texts[dw.path] and fc.texts[dw.path].value:
path = fc.texts[dw.path].value
dw2 = DataWriter(path,
fc.texts[dw.file_name].value)
except:
raise ModelError("Unable to resolve simulation writer parameters in component '{0}'",
fc.id)
fc.simulation.add(dw2)
for ew in ct.simulation.event_writers:
try:
path = '.'
if fc.texts[ew.path] and fc.texts[ew.path].value:
path = fc.texts[ew.path].value
ew2 = EventWriter(path,
fc.texts[ew.file_name].value,
fc.texts[ew.format].value)
except:
raise ModelError("Unable to resolve simulation writer parameters in component '{0}'",
fc.id)
fc.simulation.add(ew2) | [
"def",
"resolve_simulation",
"(",
"self",
",",
"fc",
",",
"ct",
")",
":",
"for",
"run",
"in",
"ct",
".",
"simulation",
".",
"runs",
":",
"try",
":",
"run2",
"=",
"Run",
"(",
"fc",
".",
"component_references",
"[",
"run",
".",
"component",
"]",
".",
"referenced_component",
",",
"run",
".",
"variable",
",",
"fc",
".",
"parameters",
"[",
"run",
".",
"increment",
"]",
".",
"numeric_value",
",",
"fc",
".",
"parameters",
"[",
"run",
".",
"total",
"]",
".",
"numeric_value",
")",
"except",
":",
"raise",
"ModelError",
"(",
"\"Unable to resolve simulation run parameters in component '{0}'\"",
",",
"fc",
".",
"id",
")",
"fc",
".",
"simulation",
".",
"add",
"(",
"run2",
")",
"for",
"record",
"in",
"ct",
".",
"simulation",
".",
"records",
":",
"try",
":",
"record2",
"=",
"Record",
"(",
"fc",
".",
"paths",
"[",
"record",
".",
"quantity",
"]",
".",
"value",
",",
"fc",
".",
"parameters",
"[",
"record",
".",
"scale",
"]",
".",
"numeric_value",
"if",
"record",
".",
"scale",
"else",
"1",
",",
"fc",
".",
"texts",
"[",
"record",
".",
"color",
"]",
".",
"value",
"if",
"record",
".",
"color",
"else",
"'#000000'",
")",
"except",
":",
"raise",
"ModelError",
"(",
"\"Unable to resolve simulation record parameters in component '{0}'\"",
",",
"fc",
".",
"id",
")",
"fc",
".",
"simulation",
".",
"add",
"(",
"record2",
")",
"for",
"event_record",
"in",
"ct",
".",
"simulation",
".",
"event_records",
":",
"try",
":",
"event_record2",
"=",
"EventRecord",
"(",
"fc",
".",
"paths",
"[",
"event_record",
".",
"quantity",
"]",
".",
"value",
",",
"fc",
".",
"texts",
"[",
"event_record",
".",
"eventPort",
"]",
".",
"value",
")",
"except",
":",
"raise",
"ModelError",
"(",
"\"Unable to resolve simulation event_record parameters in component '{0}'\"",
",",
"fc",
".",
"id",
")",
"fc",
".",
"simulation",
".",
"add",
"(",
"event_record2",
")",
"for",
"dd",
"in",
"ct",
".",
"simulation",
".",
"data_displays",
":",
"try",
":",
"dd2",
"=",
"DataDisplay",
"(",
"fc",
".",
"texts",
"[",
"dd",
".",
"title",
"]",
".",
"value",
",",
"''",
")",
"if",
"'timeScale'",
"in",
"fc",
".",
"parameters",
":",
"dd2",
".",
"timeScale",
"=",
"fc",
".",
"parameters",
"[",
"'timeScale'",
"]",
".",
"numeric_value",
"except",
":",
"raise",
"ModelError",
"(",
"\"Unable to resolve simulation display parameters in component '{0}'\"",
",",
"fc",
".",
"id",
")",
"fc",
".",
"simulation",
".",
"add",
"(",
"dd2",
")",
"for",
"dw",
"in",
"ct",
".",
"simulation",
".",
"data_writers",
":",
"try",
":",
"path",
"=",
"'.'",
"if",
"fc",
".",
"texts",
"[",
"dw",
".",
"path",
"]",
"and",
"fc",
".",
"texts",
"[",
"dw",
".",
"path",
"]",
".",
"value",
":",
"path",
"=",
"fc",
".",
"texts",
"[",
"dw",
".",
"path",
"]",
".",
"value",
"dw2",
"=",
"DataWriter",
"(",
"path",
",",
"fc",
".",
"texts",
"[",
"dw",
".",
"file_name",
"]",
".",
"value",
")",
"except",
":",
"raise",
"ModelError",
"(",
"\"Unable to resolve simulation writer parameters in component '{0}'\"",
",",
"fc",
".",
"id",
")",
"fc",
".",
"simulation",
".",
"add",
"(",
"dw2",
")",
"for",
"ew",
"in",
"ct",
".",
"simulation",
".",
"event_writers",
":",
"try",
":",
"path",
"=",
"'.'",
"if",
"fc",
".",
"texts",
"[",
"ew",
".",
"path",
"]",
"and",
"fc",
".",
"texts",
"[",
"ew",
".",
"path",
"]",
".",
"value",
":",
"path",
"=",
"fc",
".",
"texts",
"[",
"ew",
".",
"path",
"]",
".",
"value",
"ew2",
"=",
"EventWriter",
"(",
"path",
",",
"fc",
".",
"texts",
"[",
"ew",
".",
"file_name",
"]",
".",
"value",
",",
"fc",
".",
"texts",
"[",
"ew",
".",
"format",
"]",
".",
"value",
")",
"except",
":",
"raise",
"ModelError",
"(",
"\"Unable to resolve simulation writer parameters in component '{0}'\"",
",",
"fc",
".",
"id",
")",
"fc",
".",
"simulation",
".",
"add",
"(",
"ew2",
")"
] | Resolve simulation specifications. | [
"Resolve",
"simulation",
"specifications",
"."
] | python | train |
GNS3/gns3-server | gns3server/compute/iou/iou_vm.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/iou/iou_vm.py#L687-L704 | def _create_netmap_config(self):
"""
Creates the NETMAP file.
"""
netmap_path = os.path.join(self.working_dir, "NETMAP")
try:
with open(netmap_path, "w", encoding="utf-8") as f:
for bay in range(0, 16):
for unit in range(0, 4):
f.write("{ubridge_id}:{bay}/{unit}{iou_id:>5d}:{bay}/{unit}\n".format(ubridge_id=str(self.application_id + 512),
bay=bay,
unit=unit,
iou_id=self.application_id))
log.info("IOU {name} [id={id}]: NETMAP file created".format(name=self._name,
id=self._id))
except OSError as e:
raise IOUError("Could not create {}: {}".format(netmap_path, e)) | [
"def",
"_create_netmap_config",
"(",
"self",
")",
":",
"netmap_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"working_dir",
",",
"\"NETMAP\"",
")",
"try",
":",
"with",
"open",
"(",
"netmap_path",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"for",
"bay",
"in",
"range",
"(",
"0",
",",
"16",
")",
":",
"for",
"unit",
"in",
"range",
"(",
"0",
",",
"4",
")",
":",
"f",
".",
"write",
"(",
"\"{ubridge_id}:{bay}/{unit}{iou_id:>5d}:{bay}/{unit}\\n\"",
".",
"format",
"(",
"ubridge_id",
"=",
"str",
"(",
"self",
".",
"application_id",
"+",
"512",
")",
",",
"bay",
"=",
"bay",
",",
"unit",
"=",
"unit",
",",
"iou_id",
"=",
"self",
".",
"application_id",
")",
")",
"log",
".",
"info",
"(",
"\"IOU {name} [id={id}]: NETMAP file created\"",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"id",
"=",
"self",
".",
"_id",
")",
")",
"except",
"OSError",
"as",
"e",
":",
"raise",
"IOUError",
"(",
"\"Could not create {}: {}\"",
".",
"format",
"(",
"netmap_path",
",",
"e",
")",
")"
] | Creates the NETMAP file. | [
"Creates",
"the",
"NETMAP",
"file",
"."
] | python | train |
twilio/twilio-python | twilio/twiml/voice_response.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/twiml/voice_response.py#L35-L78 | def dial(self, number=None, action=None, method=None, timeout=None,
hangup_on_star=None, time_limit=None, caller_id=None, record=None,
trim=None, recording_status_callback=None,
recording_status_callback_method=None,
recording_status_callback_event=None, answer_on_bridge=None,
ring_tone=None, **kwargs):
"""
Create a <Dial> element
:param number: Phone number to dial
:param action: Action URL
:param method: Action URL method
:param timeout: Time to wait for answer
:param hangup_on_star: Hangup call on star press
:param time_limit: Max time length
:param caller_id: Caller ID to display
:param record: Record the call
:param trim: Trim the recording
:param recording_status_callback: Recording status callback URL
:param recording_status_callback_method: Recording status callback URL method
:param recording_status_callback_event: Recording status callback events
:param answer_on_bridge: Preserve the ringing behavior of the inbound call until the Dialed call picks up
:param ring_tone: Ringtone allows you to override the ringback tone that Twilio will play back to the caller while executing the Dial
:param kwargs: additional attributes
:returns: <Dial> element
"""
return self.nest(Dial(
number=number,
action=action,
method=method,
timeout=timeout,
hangup_on_star=hangup_on_star,
time_limit=time_limit,
caller_id=caller_id,
record=record,
trim=trim,
recording_status_callback=recording_status_callback,
recording_status_callback_method=recording_status_callback_method,
recording_status_callback_event=recording_status_callback_event,
answer_on_bridge=answer_on_bridge,
ring_tone=ring_tone,
**kwargs
)) | [
"def",
"dial",
"(",
"self",
",",
"number",
"=",
"None",
",",
"action",
"=",
"None",
",",
"method",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"hangup_on_star",
"=",
"None",
",",
"time_limit",
"=",
"None",
",",
"caller_id",
"=",
"None",
",",
"record",
"=",
"None",
",",
"trim",
"=",
"None",
",",
"recording_status_callback",
"=",
"None",
",",
"recording_status_callback_method",
"=",
"None",
",",
"recording_status_callback_event",
"=",
"None",
",",
"answer_on_bridge",
"=",
"None",
",",
"ring_tone",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"nest",
"(",
"Dial",
"(",
"number",
"=",
"number",
",",
"action",
"=",
"action",
",",
"method",
"=",
"method",
",",
"timeout",
"=",
"timeout",
",",
"hangup_on_star",
"=",
"hangup_on_star",
",",
"time_limit",
"=",
"time_limit",
",",
"caller_id",
"=",
"caller_id",
",",
"record",
"=",
"record",
",",
"trim",
"=",
"trim",
",",
"recording_status_callback",
"=",
"recording_status_callback",
",",
"recording_status_callback_method",
"=",
"recording_status_callback_method",
",",
"recording_status_callback_event",
"=",
"recording_status_callback_event",
",",
"answer_on_bridge",
"=",
"answer_on_bridge",
",",
"ring_tone",
"=",
"ring_tone",
",",
"*",
"*",
"kwargs",
")",
")"
] | Create a <Dial> element
:param number: Phone number to dial
:param action: Action URL
:param method: Action URL method
:param timeout: Time to wait for answer
:param hangup_on_star: Hangup call on star press
:param time_limit: Max time length
:param caller_id: Caller ID to display
:param record: Record the call
:param trim: Trim the recording
:param recording_status_callback: Recording status callback URL
:param recording_status_callback_method: Recording status callback URL method
:param recording_status_callback_event: Recording status callback events
:param answer_on_bridge: Preserve the ringing behavior of the inbound call until the Dialed call picks up
:param ring_tone: Ringtone allows you to override the ringback tone that Twilio will play back to the caller while executing the Dial
:param kwargs: additional attributes
:returns: <Dial> element | [
"Create",
"a",
"<Dial",
">",
"element"
] | python | train |
SCIP-Interfaces/PySCIPOpt | examples/finished/read_tsplib.py | https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/finished/read_tsplib.py#L27-L35 | def distL1(x1,y1,x2,y2):
"""Compute the L1-norm (Manhattan) distance between two points.
The distance is rounded to the closest integer, for compatibility
with the TSPLIB convention.
The two points are located on coordinates (x1,y1) and (x2,y2),
sent as parameters"""
return int(abs(x2-x1) + abs(y2-y1)+.5) | [
"def",
"distL1",
"(",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")",
":",
"return",
"int",
"(",
"abs",
"(",
"x2",
"-",
"x1",
")",
"+",
"abs",
"(",
"y2",
"-",
"y1",
")",
"+",
".5",
")"
] | Compute the L1-norm (Manhattan) distance between two points.
The distance is rounded to the closest integer, for compatibility
with the TSPLIB convention.
The two points are located on coordinates (x1,y1) and (x2,y2),
sent as parameters | [
"Compute",
"the",
"L1",
"-",
"norm",
"(",
"Manhattan",
")",
"distance",
"between",
"two",
"points",
"."
] | python | train |
edaniszewski/bison | bison/bison.py | https://github.com/edaniszewski/bison/blob/0b889196bb314a0803c4089fe102eacacabb118b/bison/bison.py#L138-L147 | def parse(self, requires_cfg=True):
"""Parse the configuration sources into `Bison`.
Args:
requires_cfg (bool): Specify whether or not parsing should fail
if a config file is not found. (default: True)
"""
self._parse_default()
self._parse_config(requires_cfg)
self._parse_env() | [
"def",
"parse",
"(",
"self",
",",
"requires_cfg",
"=",
"True",
")",
":",
"self",
".",
"_parse_default",
"(",
")",
"self",
".",
"_parse_config",
"(",
"requires_cfg",
")",
"self",
".",
"_parse_env",
"(",
")"
] | Parse the configuration sources into `Bison`.
Args:
requires_cfg (bool): Specify whether or not parsing should fail
if a config file is not found. (default: True) | [
"Parse",
"the",
"configuration",
"sources",
"into",
"Bison",
"."
] | python | train |
materials-data-facility/toolbox | mdf_toolbox/toolbox.py | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L65-L254 | def login(credentials=None, app_name=None, services=None, client_id=None, make_clients=True,
clear_old_tokens=False, token_dir=DEFAULT_CRED_PATH, **kwargs):
"""Log in to Globus services
Arguments:
credentials (str or dict): A string filename, string JSON, or dictionary
with credential and config information.
By default, looks in ``~/mdf/credentials/globus_login.json``.
Contains ``app_name``, ``services``, and ``client_id`` as described below.
app_name (str): Name of script/client. This will form the name of the token cache file.
**Default**: ``'UNKNOWN'``.
services (list of str): Services to authenticate with.
**Default**: ``[]``.
client_id (str): The ID of the client, given when registered with Globus.
**Default**: The MDF Native Clients ID.
make_clients (bool): If ``True``, will make and return appropriate clients with
generated tokens. If ``False``, will only return authorizers.
**Default**: ``True``.
clear_old_tokens (bool): If ``True``, delete old token file if it exists,
forcing user to re-login. If ``False``, use existing token file if there is one.
**Default**: ``False``.
token_dir (str): The path to the directory to save tokens in and look for
credentials by default. **Default**: ``DEFAULT_CRED_PATH``.
Returns:
dict: The clients and authorizers requested, indexed by service name.
For example, if ``login()`` is told to auth with ``'search'``
then the search client will be in the ``'search'`` field.
Note:
Previously requested tokens (which are cached) will be returned alongside
explicitly requested ones.
"""
NATIVE_CLIENT_ID = "98bfc684-977f-4670-8669-71f8337688e4"
DEFAULT_CRED_FILENAME = "globus_login.json"
def _get_tokens(client, scopes, app_name, force_refresh=False):
token_path = os.path.join(token_dir, app_name + "_tokens.json")
if force_refresh:
if os.path.exists(token_path):
os.remove(token_path)
if os.path.exists(token_path):
with open(token_path, "r") as tf:
try:
tokens = json.load(tf)
# Check that requested scopes are present
# :all scopes should override any scopes with lesser permissions
# Some scopes are returned in multiples and should be separated
existing_scopes = []
for sc in [val["scope"] for val in tokens.values()]:
if " " in sc:
existing_scopes += sc.split(" ")
else:
existing_scopes.append(sc)
permissive_scopes = [scope.replace(":all", "")
for scope in existing_scopes
if scope.endswith(":all")]
missing_scopes = [scope for scope in scopes.split(" ")
if scope not in existing_scopes
and not any([scope.startswith(per_sc)
for per_sc in permissive_scopes])
and not scope.strip() == ""]
# If some scopes are missing, regenerate tokens
# Get tokens for existing scopes and new scopes
if len(missing_scopes) > 0:
scopes = " ".join(existing_scopes + missing_scopes)
os.remove(token_path)
except ValueError:
# Tokens corrupted
os.remove(token_path)
if not os.path.exists(token_path):
try:
os.makedirs(token_dir)
except (IOError, OSError):
pass
client.oauth2_start_flow(requested_scopes=scopes, refresh_tokens=True)
authorize_url = client.oauth2_get_authorize_url()
print("It looks like this is the first time you're accessing this service.",
"\nPlease log in to Globus at this link:\n", authorize_url)
auth_code = input("Copy and paste the authorization code here: ").strip()
# Handle 401s
try:
token_response = client.oauth2_exchange_code_for_tokens(auth_code)
except globus_sdk.GlobusAPIError as e:
if e.http_status == 401:
raise ValueError("\nSorry, that code isn't valid."
" You can try again, or contact support.")
else:
raise
tokens = token_response.by_resource_server
os.umask(0o077)
with open(token_path, "w") as tf:
json.dump(tokens, tf)
print("Thanks! You're now logged in.")
return tokens
# If creds supplied in 'credentials', process
if credentials:
if type(credentials) is str:
try:
with open(credentials) as cred_file:
creds = json.load(cred_file)
except IOError:
try:
creds = json.loads(credentials)
except ValueError:
raise ValueError("Credential string unreadable")
elif type(credentials) is dict:
creds = credentials
else:
try:
with open(os.path.join(os.getcwd(), DEFAULT_CRED_FILENAME)) as cred_file:
creds = json.load(cred_file)
except IOError:
try:
with open(os.path.join(token_dir, DEFAULT_CRED_FILENAME)) as cred_file:
creds = json.load(cred_file)
except IOError:
raise ValueError("Credentials/configuration must be passed as a "
+ "filename string, JSON string, or dictionary, "
+ "or provided in '"
+ DEFAULT_CRED_FILENAME
+ "' or '"
+ token_dir
+ "'.")
app_name = creds.get("app_name")
services = creds.get("services", services)
client_id = creds.get("client_id")
if not app_name:
app_name = "UNKNOWN"
if not services:
services = []
elif isinstance(services, str):
services = [services]
if not client_id:
client_id = NATIVE_CLIENT_ID
native_client = globus_sdk.NativeAppAuthClient(client_id, app_name=app_name)
servs = []
for serv in services:
serv = serv.lower().strip()
if type(serv) is str:
servs += serv.split(" ")
else:
servs += list(serv)
# Translate services into scopes as possible
scopes = " ".join([KNOWN_SCOPES.get(sc, sc) for sc in servs])
all_tokens = _get_tokens(native_client, scopes, app_name, force_refresh=clear_old_tokens)
# Make authorizers with every returned token
all_authorizers = {}
for key, tokens in all_tokens.items():
# TODO: Allow non-Refresh authorizers
try:
all_authorizers[key] = globus_sdk.RefreshTokenAuthorizer(tokens["refresh_token"],
native_client)
except KeyError:
print("Error: Unable to retrieve tokens for '{}'.\n"
"You may need to delete your old tokens and retry.".format(key))
returnables = {}
# Populate clients and named services
# Only translate back services - if user provides scope directly, don't translate back
# ex. transfer => urn:transfer.globus.org:all => transfer,
# but urn:transfer.globus.org:all !=> transfer
for service in servs:
token_key = KNOWN_TOKEN_KEYS.get(service)
# If the .by_resource_server key (token key) for the service was returned
if token_key in all_authorizers.keys():
# If there is an applicable client (all clients have known token key)
# Pop from all_authorizers to remove from final return value
if make_clients and KNOWN_CLIENTS.get(service):
try:
returnables[service] = KNOWN_CLIENTS[service](
authorizer=all_authorizers.pop(token_key),
http_timeout=STD_TIMEOUT)
except globus_sdk.GlobusAPIError as e:
print("Error: Unable to create {} client: {}".format(service, e.message))
# If no applicable client, just translate the key
else:
returnables[service] = all_authorizers.pop(token_key)
# Add authorizers not associated with service to returnables
returnables.update(all_authorizers)
return returnables | [
"def",
"login",
"(",
"credentials",
"=",
"None",
",",
"app_name",
"=",
"None",
",",
"services",
"=",
"None",
",",
"client_id",
"=",
"None",
",",
"make_clients",
"=",
"True",
",",
"clear_old_tokens",
"=",
"False",
",",
"token_dir",
"=",
"DEFAULT_CRED_PATH",
",",
"*",
"*",
"kwargs",
")",
":",
"NATIVE_CLIENT_ID",
"=",
"\"98bfc684-977f-4670-8669-71f8337688e4\"",
"DEFAULT_CRED_FILENAME",
"=",
"\"globus_login.json\"",
"def",
"_get_tokens",
"(",
"client",
",",
"scopes",
",",
"app_name",
",",
"force_refresh",
"=",
"False",
")",
":",
"token_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"token_dir",
",",
"app_name",
"+",
"\"_tokens.json\"",
")",
"if",
"force_refresh",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"token_path",
")",
":",
"os",
".",
"remove",
"(",
"token_path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"token_path",
")",
":",
"with",
"open",
"(",
"token_path",
",",
"\"r\"",
")",
"as",
"tf",
":",
"try",
":",
"tokens",
"=",
"json",
".",
"load",
"(",
"tf",
")",
"# Check that requested scopes are present",
"# :all scopes should override any scopes with lesser permissions",
"# Some scopes are returned in multiples and should be separated",
"existing_scopes",
"=",
"[",
"]",
"for",
"sc",
"in",
"[",
"val",
"[",
"\"scope\"",
"]",
"for",
"val",
"in",
"tokens",
".",
"values",
"(",
")",
"]",
":",
"if",
"\" \"",
"in",
"sc",
":",
"existing_scopes",
"+=",
"sc",
".",
"split",
"(",
"\" \"",
")",
"else",
":",
"existing_scopes",
".",
"append",
"(",
"sc",
")",
"permissive_scopes",
"=",
"[",
"scope",
".",
"replace",
"(",
"\":all\"",
",",
"\"\"",
")",
"for",
"scope",
"in",
"existing_scopes",
"if",
"scope",
".",
"endswith",
"(",
"\":all\"",
")",
"]",
"missing_scopes",
"=",
"[",
"scope",
"for",
"scope",
"in",
"scopes",
".",
"split",
"(",
"\" \"",
")",
"if",
"scope",
"not",
"in",
"existing_scopes",
"and",
"not",
"any",
"(",
"[",
"scope",
".",
"startswith",
"(",
"per_sc",
")",
"for",
"per_sc",
"in",
"permissive_scopes",
"]",
")",
"and",
"not",
"scope",
".",
"strip",
"(",
")",
"==",
"\"\"",
"]",
"# If some scopes are missing, regenerate tokens",
"# Get tokens for existing scopes and new scopes",
"if",
"len",
"(",
"missing_scopes",
")",
">",
"0",
":",
"scopes",
"=",
"\" \"",
".",
"join",
"(",
"existing_scopes",
"+",
"missing_scopes",
")",
"os",
".",
"remove",
"(",
"token_path",
")",
"except",
"ValueError",
":",
"# Tokens corrupted",
"os",
".",
"remove",
"(",
"token_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"token_path",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"token_dir",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"pass",
"client",
".",
"oauth2_start_flow",
"(",
"requested_scopes",
"=",
"scopes",
",",
"refresh_tokens",
"=",
"True",
")",
"authorize_url",
"=",
"client",
".",
"oauth2_get_authorize_url",
"(",
")",
"print",
"(",
"\"It looks like this is the first time you're accessing this service.\"",
",",
"\"\\nPlease log in to Globus at this link:\\n\"",
",",
"authorize_url",
")",
"auth_code",
"=",
"input",
"(",
"\"Copy and paste the authorization code here: \"",
")",
".",
"strip",
"(",
")",
"# Handle 401s",
"try",
":",
"token_response",
"=",
"client",
".",
"oauth2_exchange_code_for_tokens",
"(",
"auth_code",
")",
"except",
"globus_sdk",
".",
"GlobusAPIError",
"as",
"e",
":",
"if",
"e",
".",
"http_status",
"==",
"401",
":",
"raise",
"ValueError",
"(",
"\"\\nSorry, that code isn't valid.\"",
"\" You can try again, or contact support.\"",
")",
"else",
":",
"raise",
"tokens",
"=",
"token_response",
".",
"by_resource_server",
"os",
".",
"umask",
"(",
"0o077",
")",
"with",
"open",
"(",
"token_path",
",",
"\"w\"",
")",
"as",
"tf",
":",
"json",
".",
"dump",
"(",
"tokens",
",",
"tf",
")",
"print",
"(",
"\"Thanks! You're now logged in.\"",
")",
"return",
"tokens",
"# If creds supplied in 'credentials', process",
"if",
"credentials",
":",
"if",
"type",
"(",
"credentials",
")",
"is",
"str",
":",
"try",
":",
"with",
"open",
"(",
"credentials",
")",
"as",
"cred_file",
":",
"creds",
"=",
"json",
".",
"load",
"(",
"cred_file",
")",
"except",
"IOError",
":",
"try",
":",
"creds",
"=",
"json",
".",
"loads",
"(",
"credentials",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Credential string unreadable\"",
")",
"elif",
"type",
"(",
"credentials",
")",
"is",
"dict",
":",
"creds",
"=",
"credentials",
"else",
":",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"DEFAULT_CRED_FILENAME",
")",
")",
"as",
"cred_file",
":",
"creds",
"=",
"json",
".",
"load",
"(",
"cred_file",
")",
"except",
"IOError",
":",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"token_dir",
",",
"DEFAULT_CRED_FILENAME",
")",
")",
"as",
"cred_file",
":",
"creds",
"=",
"json",
".",
"load",
"(",
"cred_file",
")",
"except",
"IOError",
":",
"raise",
"ValueError",
"(",
"\"Credentials/configuration must be passed as a \"",
"+",
"\"filename string, JSON string, or dictionary, \"",
"+",
"\"or provided in '\"",
"+",
"DEFAULT_CRED_FILENAME",
"+",
"\"' or '\"",
"+",
"token_dir",
"+",
"\"'.\"",
")",
"app_name",
"=",
"creds",
".",
"get",
"(",
"\"app_name\"",
")",
"services",
"=",
"creds",
".",
"get",
"(",
"\"services\"",
",",
"services",
")",
"client_id",
"=",
"creds",
".",
"get",
"(",
"\"client_id\"",
")",
"if",
"not",
"app_name",
":",
"app_name",
"=",
"\"UNKNOWN\"",
"if",
"not",
"services",
":",
"services",
"=",
"[",
"]",
"elif",
"isinstance",
"(",
"services",
",",
"str",
")",
":",
"services",
"=",
"[",
"services",
"]",
"if",
"not",
"client_id",
":",
"client_id",
"=",
"NATIVE_CLIENT_ID",
"native_client",
"=",
"globus_sdk",
".",
"NativeAppAuthClient",
"(",
"client_id",
",",
"app_name",
"=",
"app_name",
")",
"servs",
"=",
"[",
"]",
"for",
"serv",
"in",
"services",
":",
"serv",
"=",
"serv",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"if",
"type",
"(",
"serv",
")",
"is",
"str",
":",
"servs",
"+=",
"serv",
".",
"split",
"(",
"\" \"",
")",
"else",
":",
"servs",
"+=",
"list",
"(",
"serv",
")",
"# Translate services into scopes as possible",
"scopes",
"=",
"\" \"",
".",
"join",
"(",
"[",
"KNOWN_SCOPES",
".",
"get",
"(",
"sc",
",",
"sc",
")",
"for",
"sc",
"in",
"servs",
"]",
")",
"all_tokens",
"=",
"_get_tokens",
"(",
"native_client",
",",
"scopes",
",",
"app_name",
",",
"force_refresh",
"=",
"clear_old_tokens",
")",
"# Make authorizers with every returned token",
"all_authorizers",
"=",
"{",
"}",
"for",
"key",
",",
"tokens",
"in",
"all_tokens",
".",
"items",
"(",
")",
":",
"# TODO: Allow non-Refresh authorizers",
"try",
":",
"all_authorizers",
"[",
"key",
"]",
"=",
"globus_sdk",
".",
"RefreshTokenAuthorizer",
"(",
"tokens",
"[",
"\"refresh_token\"",
"]",
",",
"native_client",
")",
"except",
"KeyError",
":",
"print",
"(",
"\"Error: Unable to retrieve tokens for '{}'.\\n\"",
"\"You may need to delete your old tokens and retry.\"",
".",
"format",
"(",
"key",
")",
")",
"returnables",
"=",
"{",
"}",
"# Populate clients and named services",
"# Only translate back services - if user provides scope directly, don't translate back",
"# ex. transfer => urn:transfer.globus.org:all => transfer,",
"# but urn:transfer.globus.org:all !=> transfer",
"for",
"service",
"in",
"servs",
":",
"token_key",
"=",
"KNOWN_TOKEN_KEYS",
".",
"get",
"(",
"service",
")",
"# If the .by_resource_server key (token key) for the service was returned",
"if",
"token_key",
"in",
"all_authorizers",
".",
"keys",
"(",
")",
":",
"# If there is an applicable client (all clients have known token key)",
"# Pop from all_authorizers to remove from final return value",
"if",
"make_clients",
"and",
"KNOWN_CLIENTS",
".",
"get",
"(",
"service",
")",
":",
"try",
":",
"returnables",
"[",
"service",
"]",
"=",
"KNOWN_CLIENTS",
"[",
"service",
"]",
"(",
"authorizer",
"=",
"all_authorizers",
".",
"pop",
"(",
"token_key",
")",
",",
"http_timeout",
"=",
"STD_TIMEOUT",
")",
"except",
"globus_sdk",
".",
"GlobusAPIError",
"as",
"e",
":",
"print",
"(",
"\"Error: Unable to create {} client: {}\"",
".",
"format",
"(",
"service",
",",
"e",
".",
"message",
")",
")",
"# If no applicable client, just translate the key",
"else",
":",
"returnables",
"[",
"service",
"]",
"=",
"all_authorizers",
".",
"pop",
"(",
"token_key",
")",
"# Add authorizers not associated with service to returnables",
"returnables",
".",
"update",
"(",
"all_authorizers",
")",
"return",
"returnables"
] | Log in to Globus services
Arguments:
credentials (str or dict): A string filename, string JSON, or dictionary
with credential and config information.
By default, looks in ``~/mdf/credentials/globus_login.json``.
Contains ``app_name``, ``services``, and ``client_id`` as described below.
app_name (str): Name of script/client. This will form the name of the token cache file.
**Default**: ``'UNKNOWN'``.
services (list of str): Services to authenticate with.
**Default**: ``[]``.
client_id (str): The ID of the client, given when registered with Globus.
**Default**: The MDF Native Clients ID.
make_clients (bool): If ``True``, will make and return appropriate clients with
generated tokens. If ``False``, will only return authorizers.
**Default**: ``True``.
clear_old_tokens (bool): If ``True``, delete old token file if it exists,
forcing user to re-login. If ``False``, use existing token file if there is one.
**Default**: ``False``.
token_dir (str): The path to the directory to save tokens in and look for
credentials by default. **Default**: ``DEFAULT_CRED_PATH``.
Returns:
dict: The clients and authorizers requested, indexed by service name.
For example, if ``login()`` is told to auth with ``'search'``
then the search client will be in the ``'search'`` field.
Note:
Previously requested tokens (which are cached) will be returned alongside
explicitly requested ones. | [
"Log",
"in",
"to",
"Globus",
"services"
] | python | train |
timgabets/pynblock | pynblock/tools.py | https://github.com/timgabets/pynblock/blob/dbdb6d06bd7741e1138bed09d874b47b23d8d200/pynblock/tools.py#L218-L234 | def modify_key_parity(key):
"""
The prior use of the function is to return the parity-validated key.
The incoming key is expected to be hex data binary representation, e.g. b'E7A3C8B1'
"""
validated_key = b''
for byte in key:
if parityOf(int(byte)) == -1:
byte_candidate = int(byte) + 1
while parityOf(byte_candidate) == -1:
byte_candidate = divmod(byte_candidate + 1, 256)[1]
validated_key += bytes([byte_candidate])
else:
validated_key += bytes([byte])
return validated_key | [
"def",
"modify_key_parity",
"(",
"key",
")",
":",
"validated_key",
"=",
"b''",
"for",
"byte",
"in",
"key",
":",
"if",
"parityOf",
"(",
"int",
"(",
"byte",
")",
")",
"==",
"-",
"1",
":",
"byte_candidate",
"=",
"int",
"(",
"byte",
")",
"+",
"1",
"while",
"parityOf",
"(",
"byte_candidate",
")",
"==",
"-",
"1",
":",
"byte_candidate",
"=",
"divmod",
"(",
"byte_candidate",
"+",
"1",
",",
"256",
")",
"[",
"1",
"]",
"validated_key",
"+=",
"bytes",
"(",
"[",
"byte_candidate",
"]",
")",
"else",
":",
"validated_key",
"+=",
"bytes",
"(",
"[",
"byte",
"]",
")",
"return",
"validated_key"
] | The prior use of the function is to return the parity-validated key.
The incoming key is expected to be hex data binary representation, e.g. b'E7A3C8B1' | [
"The",
"prior",
"use",
"of",
"the",
"function",
"is",
"to",
"return",
"the",
"parity",
"-",
"validated",
"key",
"."
] | python | train |
DheerendraRathor/django-auth-ldap-ng | django_auth_ldap/backend.py | https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/backend.py#L211-L225 | def get_or_create_user(self, username, ldap_user):
"""
This must return a (User, created) 2-tuple for the given LDAP user.
username is the Django-friendly username of the user. ldap_user.dn is
the user's DN and ldap_user.attrs contains all of their LDAP attributes.
"""
model = self.get_user_model()
username_field = getattr(model, 'USERNAME_FIELD', 'username')
kwargs = {
username_field + '__iexact': username,
'defaults': {username_field: username.lower()}
}
return model.objects.get_or_create(**kwargs) | [
"def",
"get_or_create_user",
"(",
"self",
",",
"username",
",",
"ldap_user",
")",
":",
"model",
"=",
"self",
".",
"get_user_model",
"(",
")",
"username_field",
"=",
"getattr",
"(",
"model",
",",
"'USERNAME_FIELD'",
",",
"'username'",
")",
"kwargs",
"=",
"{",
"username_field",
"+",
"'__iexact'",
":",
"username",
",",
"'defaults'",
":",
"{",
"username_field",
":",
"username",
".",
"lower",
"(",
")",
"}",
"}",
"return",
"model",
".",
"objects",
".",
"get_or_create",
"(",
"*",
"*",
"kwargs",
")"
] | This must return a (User, created) 2-tuple for the given LDAP user.
username is the Django-friendly username of the user. ldap_user.dn is
the user's DN and ldap_user.attrs contains all of their LDAP attributes. | [
"This",
"must",
"return",
"a",
"(",
"User",
"created",
")",
"2",
"-",
"tuple",
"for",
"the",
"given",
"LDAP",
"user",
".",
"username",
"is",
"the",
"Django",
"-",
"friendly",
"username",
"of",
"the",
"user",
".",
"ldap_user",
".",
"dn",
"is",
"the",
"user",
"s",
"DN",
"and",
"ldap_user",
".",
"attrs",
"contains",
"all",
"of",
"their",
"LDAP",
"attributes",
"."
] | python | train |
HydrelioxGitHub/pybbox | pybbox/__init__.py | https://github.com/HydrelioxGitHub/pybbox/blob/bedcdccab5d18d36890ef8bf414845f2dec18b5c/pybbox/__init__.py#L83-L96 | def get_token(self):
"""
Return a string which is a token, needed for some API calls
:return: Token (can be used with some API call
:rtype: str
.. todo:: make a token class to be able to store date of expiration
"""
self.bbox_auth.set_access(BboxConstant.AUTHENTICATION_LEVEL_PRIVATE, BboxConstant.AUTHENTICATION_LEVEL_PRIVATE)
self.bbox_url.set_api_name(BboxConstant.API_DEVICE, "token")
api = BboxApiCall(self.bbox_url, BboxConstant.HTTP_METHOD_GET, None,
self.bbox_auth)
resp = api.execute_api_request()
return resp.json()[0]['device']['token'] | [
"def",
"get_token",
"(",
"self",
")",
":",
"self",
".",
"bbox_auth",
".",
"set_access",
"(",
"BboxConstant",
".",
"AUTHENTICATION_LEVEL_PRIVATE",
",",
"BboxConstant",
".",
"AUTHENTICATION_LEVEL_PRIVATE",
")",
"self",
".",
"bbox_url",
".",
"set_api_name",
"(",
"BboxConstant",
".",
"API_DEVICE",
",",
"\"token\"",
")",
"api",
"=",
"BboxApiCall",
"(",
"self",
".",
"bbox_url",
",",
"BboxConstant",
".",
"HTTP_METHOD_GET",
",",
"None",
",",
"self",
".",
"bbox_auth",
")",
"resp",
"=",
"api",
".",
"execute_api_request",
"(",
")",
"return",
"resp",
".",
"json",
"(",
")",
"[",
"0",
"]",
"[",
"'device'",
"]",
"[",
"'token'",
"]"
] | Return a string which is a token, needed for some API calls
:return: Token (can be used with some API call
:rtype: str
.. todo:: make a token class to be able to store date of expiration | [
"Return",
"a",
"string",
"which",
"is",
"a",
"token",
"needed",
"for",
"some",
"API",
"calls",
":",
"return",
":",
"Token",
"(",
"can",
"be",
"used",
"with",
"some",
"API",
"call",
":",
"rtype",
":",
"str"
] | python | train |
arista-eosplus/pyeapi | pyeapi/api/abstract.py | https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/abstract.py#L162-L174 | def configure_interface(self, name, commands):
"""Configures the specified interface with the commands
Args:
name (str): The interface name to configure
commands: The commands to configure in the interface
Returns:
True if the commands completed successfully
"""
commands = make_iterable(commands)
commands.insert(0, 'interface %s' % name)
return self.configure(commands) | [
"def",
"configure_interface",
"(",
"self",
",",
"name",
",",
"commands",
")",
":",
"commands",
"=",
"make_iterable",
"(",
"commands",
")",
"commands",
".",
"insert",
"(",
"0",
",",
"'interface %s'",
"%",
"name",
")",
"return",
"self",
".",
"configure",
"(",
"commands",
")"
] | Configures the specified interface with the commands
Args:
name (str): The interface name to configure
commands: The commands to configure in the interface
Returns:
True if the commands completed successfully | [
"Configures",
"the",
"specified",
"interface",
"with",
"the",
"commands"
] | python | train |
dmckeone/frosty | frosty/freezers.py | https://github.com/dmckeone/frosty/blob/868d81e72b6c8e354af3697531c20f116cd1fc9a/frosty/freezers.py#L44-L82 | def build_includes(cls, include_packages):
"""
The default include strategy is to add a star (*) wild card after all sub-packages (but not the main package).
This strategy is compatible with py2app and bbfreeze.
Example (From SaltStack 2014.7):
salt
salt.fileserver.*
salt.modules.*
etc...
:param include_packages: List of package references to recurse for subpackages
"""
includes, package_root_paths = cls._split_packages(include_packages)
for package_path, package_name in six.iteritems(package_root_paths):
if re.search(r'__init__.py.*$', package_path):
# Looks like a package. Walk the directory and see if there are more.
package_files = set([os.path.dirname(package_path)])
for root, dirs, files in os.walk(os.path.dirname(package_path)):
if '__init__.py' in files:
package_files.add(root)
if len(package_files) > 1:
common_prefix = os.path.commonprefix(package_files)
common_dir = os.path.dirname(common_prefix)
package_tails = set([f[len(common_dir) + len(os.sep):] for f in package_files])
package_names = set([tail.replace(os.sep, '.') for tail in package_tails])
package_names_with_star = set([pkg + '.*' if pkg != package_name else pkg for pkg in package_names])
includes |= package_names_with_star
else:
# No sub-packages. Just add the package name by itself.
includes.add(package_name)
else:
# Not a package. Just add the module.
includes.add(package_name)
return includes | [
"def",
"build_includes",
"(",
"cls",
",",
"include_packages",
")",
":",
"includes",
",",
"package_root_paths",
"=",
"cls",
".",
"_split_packages",
"(",
"include_packages",
")",
"for",
"package_path",
",",
"package_name",
"in",
"six",
".",
"iteritems",
"(",
"package_root_paths",
")",
":",
"if",
"re",
".",
"search",
"(",
"r'__init__.py.*$'",
",",
"package_path",
")",
":",
"# Looks like a package. Walk the directory and see if there are more.",
"package_files",
"=",
"set",
"(",
"[",
"os",
".",
"path",
".",
"dirname",
"(",
"package_path",
")",
"]",
")",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"package_path",
")",
")",
":",
"if",
"'__init__.py'",
"in",
"files",
":",
"package_files",
".",
"add",
"(",
"root",
")",
"if",
"len",
"(",
"package_files",
")",
">",
"1",
":",
"common_prefix",
"=",
"os",
".",
"path",
".",
"commonprefix",
"(",
"package_files",
")",
"common_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"common_prefix",
")",
"package_tails",
"=",
"set",
"(",
"[",
"f",
"[",
"len",
"(",
"common_dir",
")",
"+",
"len",
"(",
"os",
".",
"sep",
")",
":",
"]",
"for",
"f",
"in",
"package_files",
"]",
")",
"package_names",
"=",
"set",
"(",
"[",
"tail",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'.'",
")",
"for",
"tail",
"in",
"package_tails",
"]",
")",
"package_names_with_star",
"=",
"set",
"(",
"[",
"pkg",
"+",
"'.*'",
"if",
"pkg",
"!=",
"package_name",
"else",
"pkg",
"for",
"pkg",
"in",
"package_names",
"]",
")",
"includes",
"|=",
"package_names_with_star",
"else",
":",
"# No sub-packages. Just add the package name by itself.",
"includes",
".",
"add",
"(",
"package_name",
")",
"else",
":",
"# Not a package. Just add the module.",
"includes",
".",
"add",
"(",
"package_name",
")",
"return",
"includes"
] | The default include strategy is to add a star (*) wild card after all sub-packages (but not the main package).
This strategy is compatible with py2app and bbfreeze.
Example (From SaltStack 2014.7):
salt
salt.fileserver.*
salt.modules.*
etc...
:param include_packages: List of package references to recurse for subpackages | [
"The",
"default",
"include",
"strategy",
"is",
"to",
"add",
"a",
"star",
"(",
"*",
")",
"wild",
"card",
"after",
"all",
"sub",
"-",
"packages",
"(",
"but",
"not",
"the",
"main",
"package",
")",
".",
"This",
"strategy",
"is",
"compatible",
"with",
"py2app",
"and",
"bbfreeze",
"."
] | python | train |
selectel/pyte | pyte/screens.py | https://github.com/selectel/pyte/blob/8adad489f86da1788a7995720c344a2fa44f244e/pyte/screens.py#L582-L589 | def linefeed(self):
"""Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
"""
self.index()
if mo.LNM in self.mode:
self.carriage_return() | [
"def",
"linefeed",
"(",
"self",
")",
":",
"self",
".",
"index",
"(",
")",
"if",
"mo",
".",
"LNM",
"in",
"self",
".",
"mode",
":",
"self",
".",
"carriage_return",
"(",
")"
] | Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return. | [
"Perform",
"an",
"index",
"and",
"if",
":",
"data",
":",
"~pyte",
".",
"modes",
".",
"LNM",
"is",
"set",
"a",
"carriage",
"return",
"."
] | python | train |
googlefonts/ufo2ft | Lib/ufo2ft/__init__.py | https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/__init__.py#L393-L470 | def compileInterpolatableOTFsFromDS(
designSpaceDoc,
preProcessorClass=OTFPreProcessor,
outlineCompilerClass=OutlineOTFCompiler,
featureCompilerClass=None,
featureWriters=None,
glyphOrder=None,
useProductionNames=None,
roundTolerance=None,
inplace=False,
):
"""Create FontTools CFF fonts from the DesignSpaceDocument UFO sources
with interpolatable outlines.
Interpolatable means without subroutinization and specializer optimizations
and no removal of overlaps.
If the Designspace contains a "public.skipExportGlyphs" lib key, these
glyphs will not be exported to the final font. If these glyphs are used as
components in any other glyph, those components get decomposed. If the lib
key doesn't exist in the Designspace, all glyphs are exported (keys in
individual UFOs are ignored). UFO groups and kerning will be pruned of
skipped glyphs.
The DesignSpaceDocument should contain SourceDescriptor objects with 'font'
attribute set to an already loaded defcon.Font object (or compatible UFO
Font class). If 'font' attribute is unset or None, an AttributeError exception
is thrown.
Return a copy of the DesignSpaceDocument object (or the same one if
inplace=True) with the source's 'font' attribute set to the corresponding
TTFont instance.
For sources that have the 'layerName' attribute defined, the corresponding TTFont
object will contain only a minimum set of tables ("head", "hmtx", "CFF ", "maxp",
"vmtx" and "VORG"), and no OpenType layout tables.
"""
for source in designSpaceDoc.sources:
if source.font is None:
raise AttributeError(
"designspace source '%s' is missing required 'font' attribute"
% getattr(source, "name", "<Unknown>")
)
skipExportGlyphs = designSpaceDoc.lib.get("public.skipExportGlyphs", [])
otfs = []
for source in designSpaceDoc.sources:
otfs.append(
compileOTF(
ufo=source.font,
layerName=source.layerName,
preProcessorClass=preProcessorClass,
outlineCompilerClass=outlineCompilerClass,
featureCompilerClass=featureCompilerClass,
featureWriters=featureWriters,
glyphOrder=glyphOrder,
useProductionNames=useProductionNames,
optimizeCFF=CFFOptimization.NONE,
roundTolerance=roundTolerance,
removeOverlaps=False,
overlapsBackend=None,
inplace=inplace,
skipExportGlyphs=skipExportGlyphs,
_tables=SPARSE_OTF_MASTER_TABLES if source.layerName else None,
)
)
if inplace:
result = designSpaceDoc
else:
# TODO try a more efficient copy method that doesn't involve (de)serializing
result = designSpaceDoc.__class__.fromstring(designSpaceDoc.tostring())
for source, otf in zip(result.sources, otfs):
source.font = otf
return result | [
"def",
"compileInterpolatableOTFsFromDS",
"(",
"designSpaceDoc",
",",
"preProcessorClass",
"=",
"OTFPreProcessor",
",",
"outlineCompilerClass",
"=",
"OutlineOTFCompiler",
",",
"featureCompilerClass",
"=",
"None",
",",
"featureWriters",
"=",
"None",
",",
"glyphOrder",
"=",
"None",
",",
"useProductionNames",
"=",
"None",
",",
"roundTolerance",
"=",
"None",
",",
"inplace",
"=",
"False",
",",
")",
":",
"for",
"source",
"in",
"designSpaceDoc",
".",
"sources",
":",
"if",
"source",
".",
"font",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"designspace source '%s' is missing required 'font' attribute\"",
"%",
"getattr",
"(",
"source",
",",
"\"name\"",
",",
"\"<Unknown>\"",
")",
")",
"skipExportGlyphs",
"=",
"designSpaceDoc",
".",
"lib",
".",
"get",
"(",
"\"public.skipExportGlyphs\"",
",",
"[",
"]",
")",
"otfs",
"=",
"[",
"]",
"for",
"source",
"in",
"designSpaceDoc",
".",
"sources",
":",
"otfs",
".",
"append",
"(",
"compileOTF",
"(",
"ufo",
"=",
"source",
".",
"font",
",",
"layerName",
"=",
"source",
".",
"layerName",
",",
"preProcessorClass",
"=",
"preProcessorClass",
",",
"outlineCompilerClass",
"=",
"outlineCompilerClass",
",",
"featureCompilerClass",
"=",
"featureCompilerClass",
",",
"featureWriters",
"=",
"featureWriters",
",",
"glyphOrder",
"=",
"glyphOrder",
",",
"useProductionNames",
"=",
"useProductionNames",
",",
"optimizeCFF",
"=",
"CFFOptimization",
".",
"NONE",
",",
"roundTolerance",
"=",
"roundTolerance",
",",
"removeOverlaps",
"=",
"False",
",",
"overlapsBackend",
"=",
"None",
",",
"inplace",
"=",
"inplace",
",",
"skipExportGlyphs",
"=",
"skipExportGlyphs",
",",
"_tables",
"=",
"SPARSE_OTF_MASTER_TABLES",
"if",
"source",
".",
"layerName",
"else",
"None",
",",
")",
")",
"if",
"inplace",
":",
"result",
"=",
"designSpaceDoc",
"else",
":",
"# TODO try a more efficient copy method that doesn't involve (de)serializing",
"result",
"=",
"designSpaceDoc",
".",
"__class__",
".",
"fromstring",
"(",
"designSpaceDoc",
".",
"tostring",
"(",
")",
")",
"for",
"source",
",",
"otf",
"in",
"zip",
"(",
"result",
".",
"sources",
",",
"otfs",
")",
":",
"source",
".",
"font",
"=",
"otf",
"return",
"result"
] | Create FontTools CFF fonts from the DesignSpaceDocument UFO sources
with interpolatable outlines.
Interpolatable means without subroutinization and specializer optimizations
and no removal of overlaps.
If the Designspace contains a "public.skipExportGlyphs" lib key, these
glyphs will not be exported to the final font. If these glyphs are used as
components in any other glyph, those components get decomposed. If the lib
key doesn't exist in the Designspace, all glyphs are exported (keys in
individual UFOs are ignored). UFO groups and kerning will be pruned of
skipped glyphs.
The DesignSpaceDocument should contain SourceDescriptor objects with 'font'
attribute set to an already loaded defcon.Font object (or compatible UFO
Font class). If 'font' attribute is unset or None, an AttributeError exception
is thrown.
Return a copy of the DesignSpaceDocument object (or the same one if
inplace=True) with the source's 'font' attribute set to the corresponding
TTFont instance.
For sources that have the 'layerName' attribute defined, the corresponding TTFont
object will contain only a minimum set of tables ("head", "hmtx", "CFF ", "maxp",
"vmtx" and "VORG"), and no OpenType layout tables. | [
"Create",
"FontTools",
"CFF",
"fonts",
"from",
"the",
"DesignSpaceDocument",
"UFO",
"sources",
"with",
"interpolatable",
"outlines",
"."
] | python | train |
etingof/pysnmp | pysnmp/smi/mibs/SNMPv2-SMI.py | https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/mibs/SNMPv2-SMI.py#L2552-L2608 | def valueToOid(self, value, impliedFlag=False, parentIndices=None):
"""Turn value object into SMI table instance identifier.
SNMP SMI table objects are identified by OIDs composed of columnar
object ID and instance index. The index part can be composed
from the values of one or more tabular objects.
This method takes an arbitrary value object and turns it into a
sequence of integers representing the tail piece of a tabular
object identifier.
Parameters
----------
value: one of the SNMP data types that can be used in SMI table
indices. Allowed types are: :py:class:`Integer`,
:py:class:`OctetString`, :py:class:`ObjectIdentifier`,
:py:class:`IpAddress` and :py:class:`Bits`.
impliedFlag: :py:class:`bool` - if `False`, the length of the
serialized value is included as the first integer of the sequence.
Otherwise the length is not included (which is frequently the
case for the last index in the series or a fixed-length value).
Returns
-------
:py:class:`tuple` - tuple of integers representing the tail piece
of an OBJECT IDENTIFIER (i.e. tabular object instance ID)
"""
if hasattr(value, 'cloneAsName'):
return value.cloneAsName(impliedFlag, parentRow=self, parentIndices=parentIndices)
baseTag = value.getTagSet().getBaseTag()
if baseTag == Integer.tagSet.getBaseTag():
return int(value),
elif IpAddress.tagSet.isSuperTagSetOf(value.getTagSet()):
return value.asNumbers()
elif baseTag == OctetString.tagSet.getBaseTag():
if impliedFlag or value.isFixedLength():
initial = ()
else:
initial = (len(value),)
return initial + value.asNumbers()
elif baseTag == ObjectIdentifier.tagSet.getBaseTag():
if impliedFlag:
return tuple(value)
else:
return (len(value),) + tuple(value)
# rfc2578, 7.1
elif baseTag == Bits.tagSet.getBaseTag():
return (len(value),) + value.asNumbers()
else:
raise error.SmiError('Unknown value type for index %r' % (value,)) | [
"def",
"valueToOid",
"(",
"self",
",",
"value",
",",
"impliedFlag",
"=",
"False",
",",
"parentIndices",
"=",
"None",
")",
":",
"if",
"hasattr",
"(",
"value",
",",
"'cloneAsName'",
")",
":",
"return",
"value",
".",
"cloneAsName",
"(",
"impliedFlag",
",",
"parentRow",
"=",
"self",
",",
"parentIndices",
"=",
"parentIndices",
")",
"baseTag",
"=",
"value",
".",
"getTagSet",
"(",
")",
".",
"getBaseTag",
"(",
")",
"if",
"baseTag",
"==",
"Integer",
".",
"tagSet",
".",
"getBaseTag",
"(",
")",
":",
"return",
"int",
"(",
"value",
")",
",",
"elif",
"IpAddress",
".",
"tagSet",
".",
"isSuperTagSetOf",
"(",
"value",
".",
"getTagSet",
"(",
")",
")",
":",
"return",
"value",
".",
"asNumbers",
"(",
")",
"elif",
"baseTag",
"==",
"OctetString",
".",
"tagSet",
".",
"getBaseTag",
"(",
")",
":",
"if",
"impliedFlag",
"or",
"value",
".",
"isFixedLength",
"(",
")",
":",
"initial",
"=",
"(",
")",
"else",
":",
"initial",
"=",
"(",
"len",
"(",
"value",
")",
",",
")",
"return",
"initial",
"+",
"value",
".",
"asNumbers",
"(",
")",
"elif",
"baseTag",
"==",
"ObjectIdentifier",
".",
"tagSet",
".",
"getBaseTag",
"(",
")",
":",
"if",
"impliedFlag",
":",
"return",
"tuple",
"(",
"value",
")",
"else",
":",
"return",
"(",
"len",
"(",
"value",
")",
",",
")",
"+",
"tuple",
"(",
"value",
")",
"# rfc2578, 7.1",
"elif",
"baseTag",
"==",
"Bits",
".",
"tagSet",
".",
"getBaseTag",
"(",
")",
":",
"return",
"(",
"len",
"(",
"value",
")",
",",
")",
"+",
"value",
".",
"asNumbers",
"(",
")",
"else",
":",
"raise",
"error",
".",
"SmiError",
"(",
"'Unknown value type for index %r'",
"%",
"(",
"value",
",",
")",
")"
] | Turn value object into SMI table instance identifier.
SNMP SMI table objects are identified by OIDs composed of columnar
object ID and instance index. The index part can be composed
from the values of one or more tabular objects.
This method takes an arbitrary value object and turns it into a
sequence of integers representing the tail piece of a tabular
object identifier.
Parameters
----------
value: one of the SNMP data types that can be used in SMI table
indices. Allowed types are: :py:class:`Integer`,
:py:class:`OctetString`, :py:class:`ObjectIdentifier`,
:py:class:`IpAddress` and :py:class:`Bits`.
impliedFlag: :py:class:`bool` - if `False`, the length of the
serialized value is included as the first integer of the sequence.
Otherwise the length is not included (which is frequently the
case for the last index in the series or a fixed-length value).
Returns
-------
:py:class:`tuple` - tuple of integers representing the tail piece
of an OBJECT IDENTIFIER (i.e. tabular object instance ID) | [
"Turn",
"value",
"object",
"into",
"SMI",
"table",
"instance",
"identifier",
"."
] | python | train |
GiulioRossetti/dynetx | dynetx/classes/function.py | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/function.py#L503-L535 | def set_node_attributes(G, values, name=None):
"""Set node attributes from dictionary of nodes and values
Parameters
----------
G : DyNetx Graph
name : string
Attribute name
values: dict
Dictionary of attribute values keyed by node. If `values` is not a
dictionary, then it is treated as a single attribute value that is then
applied to every node in `G`.
"""
# Set node attributes based on type of `values`
if name is not None: # `values` must not be a dict of dict
try: # `values` is a dict
for n, v in values.items():
try:
G.node[n][name] = values[n]
except KeyError:
pass
except AttributeError: # `values` is a constant
for n in G:
G.node[n][name] = values
else: # `values` must be dict of dict
for n, d in values.items():
try:
G.node[n].update(d)
except KeyError:
pass | [
"def",
"set_node_attributes",
"(",
"G",
",",
"values",
",",
"name",
"=",
"None",
")",
":",
"# Set node attributes based on type of `values`",
"if",
"name",
"is",
"not",
"None",
":",
"# `values` must not be a dict of dict",
"try",
":",
"# `values` is a dict",
"for",
"n",
",",
"v",
"in",
"values",
".",
"items",
"(",
")",
":",
"try",
":",
"G",
".",
"node",
"[",
"n",
"]",
"[",
"name",
"]",
"=",
"values",
"[",
"n",
"]",
"except",
"KeyError",
":",
"pass",
"except",
"AttributeError",
":",
"# `values` is a constant",
"for",
"n",
"in",
"G",
":",
"G",
".",
"node",
"[",
"n",
"]",
"[",
"name",
"]",
"=",
"values",
"else",
":",
"# `values` must be dict of dict",
"for",
"n",
",",
"d",
"in",
"values",
".",
"items",
"(",
")",
":",
"try",
":",
"G",
".",
"node",
"[",
"n",
"]",
".",
"update",
"(",
"d",
")",
"except",
"KeyError",
":",
"pass"
] | Set node attributes from dictionary of nodes and values
Parameters
----------
G : DyNetx Graph
name : string
Attribute name
values: dict
Dictionary of attribute values keyed by node. If `values` is not a
dictionary, then it is treated as a single attribute value that is then
applied to every node in `G`. | [
"Set",
"node",
"attributes",
"from",
"dictionary",
"of",
"nodes",
"and",
"values"
] | python | train |
exa-analytics/exa | exa/core/editor.py | https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/core/editor.py#L108-L119 | def append(self, lines):
"""
Args:
lines (list): List of line strings to append to the end of the editor
"""
if isinstance(lines, list):
self._lines = self._lines + lines
elif isinstance(lines, str):
lines = lines.split('\n')
self._lines = self._lines + lines
else:
raise TypeError('Unsupported type {0} for lines.'.format(type(lines))) | [
"def",
"append",
"(",
"self",
",",
"lines",
")",
":",
"if",
"isinstance",
"(",
"lines",
",",
"list",
")",
":",
"self",
".",
"_lines",
"=",
"self",
".",
"_lines",
"+",
"lines",
"elif",
"isinstance",
"(",
"lines",
",",
"str",
")",
":",
"lines",
"=",
"lines",
".",
"split",
"(",
"'\\n'",
")",
"self",
".",
"_lines",
"=",
"self",
".",
"_lines",
"+",
"lines",
"else",
":",
"raise",
"TypeError",
"(",
"'Unsupported type {0} for lines.'",
".",
"format",
"(",
"type",
"(",
"lines",
")",
")",
")"
] | Args:
lines (list): List of line strings to append to the end of the editor | [
"Args",
":",
"lines",
"(",
"list",
")",
":",
"List",
"of",
"line",
"strings",
"to",
"append",
"to",
"the",
"end",
"of",
"the",
"editor"
] | python | train |
jgillick/LendingClub | lendingclub/filters.py | https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/filters.py#L431-L455 | def all_filters(lc):
"""
Get a list of all your saved filters
Parameters
----------
lc : :py:class:`lendingclub.LendingClub`
An instance of the authenticated LendingClub class
Returns
-------
list
A list of lendingclub.filters.SavedFilter objects
"""
filters = []
response = lc.session.get('/browse/getSavedFiltersAj.action')
json_response = response.json()
# Load all filters
if lc.session.json_success(json_response):
for saved in json_response['filters']:
filters.append(SavedFilter(lc, saved['id']))
return filters | [
"def",
"all_filters",
"(",
"lc",
")",
":",
"filters",
"=",
"[",
"]",
"response",
"=",
"lc",
".",
"session",
".",
"get",
"(",
"'/browse/getSavedFiltersAj.action'",
")",
"json_response",
"=",
"response",
".",
"json",
"(",
")",
"# Load all filters",
"if",
"lc",
".",
"session",
".",
"json_success",
"(",
"json_response",
")",
":",
"for",
"saved",
"in",
"json_response",
"[",
"'filters'",
"]",
":",
"filters",
".",
"append",
"(",
"SavedFilter",
"(",
"lc",
",",
"saved",
"[",
"'id'",
"]",
")",
")",
"return",
"filters"
] | Get a list of all your saved filters
Parameters
----------
lc : :py:class:`lendingclub.LendingClub`
An instance of the authenticated LendingClub class
Returns
-------
list
A list of lendingclub.filters.SavedFilter objects | [
"Get",
"a",
"list",
"of",
"all",
"your",
"saved",
"filters"
] | python | train |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L179-L243 | def get_output_directory(args):
"""
Determination of the directory for output placement involves possibilities
for explicit user instruction (absolute path or relative to execution) and
implicit default configuration (absolute path or relative to input) from
the system global configuration file. This function is responsible for
reliably returning the appropriate output directory which will contain any
log(s), ePub(s), and unzipped output of OpenAccess_EPUB.
It utilizes the parsed args, passed as an object, and is self-sufficient in
accessing the config file.
All paths returned by this function are absolute.
"""
#Import the global config file as a module
import imp
config_path = os.path.join(cache_location(), 'config.py')
try:
config = imp.load_source('config', config_path)
except IOError:
print('Could not find {0}, please run oae-quickstart'.format(config_path))
sys.exit()
#args.output is the explicit user instruction, None if unspecified
if args.output:
#args.output may be an absolute path
if os.path.isabs(args.output):
return args.output # return as is
#or args.output may be a relative path, relative to cwd
else:
return evaluate_relative_path(relative=args.output)
#config.default_output for default behavior without explicit instruction
else:
#config.default_output may be an absolute_path
if os.path.isabs(config.default_output):
return config.default_output
#or config.default_output may be a relative path, relative to input
else:
if args.input: # The case of single input
if 'http://www' in args.input:
#Fetched from internet by URL
raw_name = url_input(args.input, download=False)
abs_input_path = os.path.join(os.getcwd(), raw_name+'.xml')
elif args.input[:4] == 'doi:':
#Fetched from internet by DOI
raw_name = doi_input(args.input, download=False)
abs_input_path = os.path.join(os.getcwd(), raw_name+'.xml')
else:
#Local option, could be anywhere
abs_input_path = get_absolute_path(args.input)
abs_input_parent = os.path.split(abs_input_path)[0]
return evaluate_relative_path(abs_input_parent, config.default_output)
elif args.batch: # The case of Batch Mode
#Batch should only work on a supplied directory
abs_batch_path = get_absolute_path(args.batch)
return abs_batch_path
elif args.zip:
#Zip is a local-only option, behaves just like local xml
abs_input_path = get_absolute_path(args.zip)
abs_input_parent = os.path.split(abs_input_path)[0]
return evaluate_relative_path(abs_input_parent, config.default_output)
elif args.collection:
return os.getcwd()
else: # Un-handled or currently unsupported options
print('The output location could not be determined...')
sys.exit() | [
"def",
"get_output_directory",
"(",
"args",
")",
":",
"#Import the global config file as a module",
"import",
"imp",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cache_location",
"(",
")",
",",
"'config.py'",
")",
"try",
":",
"config",
"=",
"imp",
".",
"load_source",
"(",
"'config'",
",",
"config_path",
")",
"except",
"IOError",
":",
"print",
"(",
"'Could not find {0}, please run oae-quickstart'",
".",
"format",
"(",
"config_path",
")",
")",
"sys",
".",
"exit",
"(",
")",
"#args.output is the explicit user instruction, None if unspecified",
"if",
"args",
".",
"output",
":",
"#args.output may be an absolute path",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"args",
".",
"output",
")",
":",
"return",
"args",
".",
"output",
"# return as is",
"#or args.output may be a relative path, relative to cwd",
"else",
":",
"return",
"evaluate_relative_path",
"(",
"relative",
"=",
"args",
".",
"output",
")",
"#config.default_output for default behavior without explicit instruction",
"else",
":",
"#config.default_output may be an absolute_path",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"config",
".",
"default_output",
")",
":",
"return",
"config",
".",
"default_output",
"#or config.default_output may be a relative path, relative to input",
"else",
":",
"if",
"args",
".",
"input",
":",
"# The case of single input",
"if",
"'http://www'",
"in",
"args",
".",
"input",
":",
"#Fetched from internet by URL",
"raw_name",
"=",
"url_input",
"(",
"args",
".",
"input",
",",
"download",
"=",
"False",
")",
"abs_input_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"raw_name",
"+",
"'.xml'",
")",
"elif",
"args",
".",
"input",
"[",
":",
"4",
"]",
"==",
"'doi:'",
":",
"#Fetched from internet by DOI",
"raw_name",
"=",
"doi_input",
"(",
"args",
".",
"input",
",",
"download",
"=",
"False",
")",
"abs_input_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"raw_name",
"+",
"'.xml'",
")",
"else",
":",
"#Local option, could be anywhere",
"abs_input_path",
"=",
"get_absolute_path",
"(",
"args",
".",
"input",
")",
"abs_input_parent",
"=",
"os",
".",
"path",
".",
"split",
"(",
"abs_input_path",
")",
"[",
"0",
"]",
"return",
"evaluate_relative_path",
"(",
"abs_input_parent",
",",
"config",
".",
"default_output",
")",
"elif",
"args",
".",
"batch",
":",
"# The case of Batch Mode",
"#Batch should only work on a supplied directory",
"abs_batch_path",
"=",
"get_absolute_path",
"(",
"args",
".",
"batch",
")",
"return",
"abs_batch_path",
"elif",
"args",
".",
"zip",
":",
"#Zip is a local-only option, behaves just like local xml",
"abs_input_path",
"=",
"get_absolute_path",
"(",
"args",
".",
"zip",
")",
"abs_input_parent",
"=",
"os",
".",
"path",
".",
"split",
"(",
"abs_input_path",
")",
"[",
"0",
"]",
"return",
"evaluate_relative_path",
"(",
"abs_input_parent",
",",
"config",
".",
"default_output",
")",
"elif",
"args",
".",
"collection",
":",
"return",
"os",
".",
"getcwd",
"(",
")",
"else",
":",
"# Un-handled or currently unsupported options",
"print",
"(",
"'The output location could not be determined...'",
")",
"sys",
".",
"exit",
"(",
")"
] | Determination of the directory for output placement involves possibilities
for explicit user instruction (absolute path or relative to execution) and
implicit default configuration (absolute path or relative to input) from
the system global configuration file. This function is responsible for
reliably returning the appropriate output directory which will contain any
log(s), ePub(s), and unzipped output of OpenAccess_EPUB.
It utilizes the parsed args, passed as an object, and is self-sufficient in
accessing the config file.
All paths returned by this function are absolute. | [
"Determination",
"of",
"the",
"directory",
"for",
"output",
"placement",
"involves",
"possibilities",
"for",
"explicit",
"user",
"instruction",
"(",
"absolute",
"path",
"or",
"relative",
"to",
"execution",
")",
"and",
"implicit",
"default",
"configuration",
"(",
"absolute",
"path",
"or",
"relative",
"to",
"input",
")",
"from",
"the",
"system",
"global",
"configuration",
"file",
".",
"This",
"function",
"is",
"responsible",
"for",
"reliably",
"returning",
"the",
"appropriate",
"output",
"directory",
"which",
"will",
"contain",
"any",
"log",
"(",
"s",
")",
"ePub",
"(",
"s",
")",
"and",
"unzipped",
"output",
"of",
"OpenAccess_EPUB",
"."
] | python | train |
apriha/lineage | src/lineage/resources.py | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L262-L309 | def _load_genetic_map(filename):
""" Load genetic map (e.g. HapMapII).
Parameters
----------
filename : str
path to compressed archive with genetic map data
Returns
-------
genetic_map : dict
dict of pandas.DataFrame genetic maps if loading was successful, else None
Notes
-----
Keys of returned dict are chromosomes and values are the corresponding genetic map.
"""
try:
genetic_map = {}
with tarfile.open(filename, "r") as tar:
# http://stackoverflow.com/a/2018576
for member in tar.getmembers():
if "genetic_map" in member.name:
df = pd.read_csv(tar.extractfile(member), sep="\t")
df = df.rename(
columns={
"Position(bp)": "pos",
"Rate(cM/Mb)": "rate",
"Map(cM)": "map",
}
)
del df["Chromosome"]
start_pos = member.name.index("chr") + 3
end_pos = member.name.index(".")
genetic_map[member.name[start_pos:end_pos]] = df
# X chrom consists of X PAR regions and X non-PAR region
genetic_map["X"] = pd.concat(
[genetic_map["X_par1"], genetic_map["X"], genetic_map["X_par2"]]
)
del genetic_map["X_par1"]
del genetic_map["X_par2"]
return genetic_map
except Exception as err:
print(err)
return None | [
"def",
"_load_genetic_map",
"(",
"filename",
")",
":",
"try",
":",
"genetic_map",
"=",
"{",
"}",
"with",
"tarfile",
".",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"tar",
":",
"# http://stackoverflow.com/a/2018576",
"for",
"member",
"in",
"tar",
".",
"getmembers",
"(",
")",
":",
"if",
"\"genetic_map\"",
"in",
"member",
".",
"name",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"tar",
".",
"extractfile",
"(",
"member",
")",
",",
"sep",
"=",
"\"\\t\"",
")",
"df",
"=",
"df",
".",
"rename",
"(",
"columns",
"=",
"{",
"\"Position(bp)\"",
":",
"\"pos\"",
",",
"\"Rate(cM/Mb)\"",
":",
"\"rate\"",
",",
"\"Map(cM)\"",
":",
"\"map\"",
",",
"}",
")",
"del",
"df",
"[",
"\"Chromosome\"",
"]",
"start_pos",
"=",
"member",
".",
"name",
".",
"index",
"(",
"\"chr\"",
")",
"+",
"3",
"end_pos",
"=",
"member",
".",
"name",
".",
"index",
"(",
"\".\"",
")",
"genetic_map",
"[",
"member",
".",
"name",
"[",
"start_pos",
":",
"end_pos",
"]",
"]",
"=",
"df",
"# X chrom consists of X PAR regions and X non-PAR region",
"genetic_map",
"[",
"\"X\"",
"]",
"=",
"pd",
".",
"concat",
"(",
"[",
"genetic_map",
"[",
"\"X_par1\"",
"]",
",",
"genetic_map",
"[",
"\"X\"",
"]",
",",
"genetic_map",
"[",
"\"X_par2\"",
"]",
"]",
")",
"del",
"genetic_map",
"[",
"\"X_par1\"",
"]",
"del",
"genetic_map",
"[",
"\"X_par2\"",
"]",
"return",
"genetic_map",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"None"
] | Load genetic map (e.g. HapMapII).
Parameters
----------
filename : str
path to compressed archive with genetic map data
Returns
-------
genetic_map : dict
dict of pandas.DataFrame genetic maps if loading was successful, else None
Notes
-----
Keys of returned dict are chromosomes and values are the corresponding genetic map. | [
"Load",
"genetic",
"map",
"(",
"e",
".",
"g",
".",
"HapMapII",
")",
"."
] | python | train |
grundprinzip/pyxplorer | pyxplorer/types.py | https://github.com/grundprinzip/pyxplorer/blob/34c1d166cfef4a94aeb6d5fcb3cbb726d48146e2/pyxplorer/types.py#L215-L224 | def columns(self):
"""
:return: the list of column in this table
"""
c = self._connection.cursor()
c.execute("describe `%s`.`%s`" % (self._db, self._name))
self._cols = []
for col in c.fetchall():
self._cols.append(Column.build(col, table=self, con=self._connection))
return self._cols | [
"def",
"columns",
"(",
"self",
")",
":",
"c",
"=",
"self",
".",
"_connection",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"describe `%s`.`%s`\"",
"%",
"(",
"self",
".",
"_db",
",",
"self",
".",
"_name",
")",
")",
"self",
".",
"_cols",
"=",
"[",
"]",
"for",
"col",
"in",
"c",
".",
"fetchall",
"(",
")",
":",
"self",
".",
"_cols",
".",
"append",
"(",
"Column",
".",
"build",
"(",
"col",
",",
"table",
"=",
"self",
",",
"con",
"=",
"self",
".",
"_connection",
")",
")",
"return",
"self",
".",
"_cols"
] | :return: the list of column in this table | [
":",
"return",
":",
"the",
"list",
"of",
"column",
"in",
"this",
"table"
] | python | train |
aws/aws-xray-sdk-python | aws_xray_sdk/core/sampling/connector.py | https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/connector.py#L136-L149 | def _dt_to_epoch(self, dt):
"""
Convert a offset-aware datetime to POSIX time.
"""
if PY2:
# The input datetime is from botocore unmarshalling and it is
# offset-aware so the timedelta of subtracting this time
# to 01/01/1970 using the same tzinfo gives us
# Unix Time (also known as POSIX Time).
time_delta = dt - datetime(1970, 1, 1).replace(tzinfo=dt.tzinfo)
return int(time_delta.total_seconds())
else:
# Added in python 3.3+ and directly returns POSIX time.
return int(dt.timestamp()) | [
"def",
"_dt_to_epoch",
"(",
"self",
",",
"dt",
")",
":",
"if",
"PY2",
":",
"# The input datetime is from botocore unmarshalling and it is",
"# offset-aware so the timedelta of subtracting this time",
"# to 01/01/1970 using the same tzinfo gives us",
"# Unix Time (also known as POSIX Time).",
"time_delta",
"=",
"dt",
"-",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"dt",
".",
"tzinfo",
")",
"return",
"int",
"(",
"time_delta",
".",
"total_seconds",
"(",
")",
")",
"else",
":",
"# Added in python 3.3+ and directly returns POSIX time.",
"return",
"int",
"(",
"dt",
".",
"timestamp",
"(",
")",
")"
] | Convert a offset-aware datetime to POSIX time. | [
"Convert",
"a",
"offset",
"-",
"aware",
"datetime",
"to",
"POSIX",
"time",
"."
] | python | train |
roboogle/gtkmvc3 | gtkmvco/gtkmvc3/support/metaclasses.py | https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/support/metaclasses.py#L225-L342 | def __create_log_props(cls, log_props, _getdict, _setdict): # @NoSelf
"""Creates all the logical property.
The list of names of properties to be created is passed
with frozenset log_props. The getter/setter information is
taken from _{get,set}dict.
This method resolves also wildcards in names, and performs
all checks to ensure correctness.
Returns the frozen set of the actually created properties
(as not log_props may be really created, e.g. when no
getter is provided, and a warning is issued).
"""
real_log_props = set()
resolved_getdict = {}
resolved_setdict = {}
for _dict_name, _dict, _resolved_dict in (
("getter", _getdict, resolved_getdict),
("setter", _setdict, resolved_setdict)):
# first resolve all wildcards
for pat, ai in ((pat, ai)
for pat, ai in _dict.items()
if frozenset(pat) & WILDCARDS):
matches = fnmatch.filter(log_props, pat)
for match in matches:
if match in _resolved_dict:
raise NameError("In class %s.%s %s property '%s' "
"is matched multiple times"
" by patterns" % \
(cls.__module__, cls.__name__, _dict_name, match))
_resolved_dict[match] = ai
if not matches:
logger.warning("In class %s.%s %s pattern '%s' "
"did not match any existing "
"logical property",
cls.__module__, cls.__name__, _dict_name, pat)
# now adds the exact matches (no wilcards) which override
# the pattern-matches
_resolved_dict.update((name, ai)
for name, ai in _dict.items()
if name in log_props)
# checks that all getter/setter have a corresponding logical
# property
not_found = [name for name in _resolved_dict
if name not in log_props]
if not_found:
logger.warning("In class %s.%s logical %s were declared for "
"non-existent observables: %s",
cls.__module__, cls.__name__, _dict_name,
str(not_found))
# creates the properties
for name in log_props:
# finds the getter
ai_get = resolved_getdict.get(name, None)
if ai_get:
# decorator-based
_getter = type(cls).get_getter(cls, name, ai_get.func,
ai_get.has_args)
_deps = ai_get.deps
else:
# old style
_getter = type(cls).get_getter(cls, name)
if _getter is None:
raise RuntimeError("In class %s.%s "
"logical observable '%s' "
"has no getter method" % \
(cls.__module__, cls.__name__, name))
_deps = type(cls)._get_old_style_getter_deps(cls, name,
_getter)
# finds the setter
ai_set = resolved_setdict.get(name, None)
if ai_set:
# decorator-based
if ai_get:
_setter = type(cls).get_setter(cls, name,
ai_set.func, ai_set.has_args,
ai_get.func, ai_get.has_args)
else:
# the getter is old style. _getter is already
# resolved wrt the name it may take, so
# getter_takes_name is False
_setter = type(cls).get_setter(cls, name,
ai_set.func, ai_set.has_args,
_getter, False)
else:
# old style setter
if ai_get:
_setter = type(cls).get_setter(cls, name,
None, None,
ai_get.func,
ai_get.has_args)
else:
_setter = type(cls).get_setter(cls, name)
# creates the logical property, here _setter can be None
prop = PropertyMeta.LogicalOP(_getter, _setter, frozenset(_deps))
setattr(cls, name, prop)
real_log_props.add(name)
# checks that all setters have a getter
setters_no_getters = (set(resolved_setdict) - real_log_props) & \
log_props
if setters_no_getters:
logger.warning("In class %s.%s logical setters have no "
"getters: %s",
cls.__module__, cls.__name__,
", ".join(setters_no_getters))
return frozenset(real_log_props) | [
"def",
"__create_log_props",
"(",
"cls",
",",
"log_props",
",",
"_getdict",
",",
"_setdict",
")",
":",
"# @NoSelf",
"real_log_props",
"=",
"set",
"(",
")",
"resolved_getdict",
"=",
"{",
"}",
"resolved_setdict",
"=",
"{",
"}",
"for",
"_dict_name",
",",
"_dict",
",",
"_resolved_dict",
"in",
"(",
"(",
"\"getter\"",
",",
"_getdict",
",",
"resolved_getdict",
")",
",",
"(",
"\"setter\"",
",",
"_setdict",
",",
"resolved_setdict",
")",
")",
":",
"# first resolve all wildcards",
"for",
"pat",
",",
"ai",
"in",
"(",
"(",
"pat",
",",
"ai",
")",
"for",
"pat",
",",
"ai",
"in",
"_dict",
".",
"items",
"(",
")",
"if",
"frozenset",
"(",
"pat",
")",
"&",
"WILDCARDS",
")",
":",
"matches",
"=",
"fnmatch",
".",
"filter",
"(",
"log_props",
",",
"pat",
")",
"for",
"match",
"in",
"matches",
":",
"if",
"match",
"in",
"_resolved_dict",
":",
"raise",
"NameError",
"(",
"\"In class %s.%s %s property '%s' \"",
"\"is matched multiple times\"",
"\" by patterns\"",
"%",
"(",
"cls",
".",
"__module__",
",",
"cls",
".",
"__name__",
",",
"_dict_name",
",",
"match",
")",
")",
"_resolved_dict",
"[",
"match",
"]",
"=",
"ai",
"if",
"not",
"matches",
":",
"logger",
".",
"warning",
"(",
"\"In class %s.%s %s pattern '%s' \"",
"\"did not match any existing \"",
"\"logical property\"",
",",
"cls",
".",
"__module__",
",",
"cls",
".",
"__name__",
",",
"_dict_name",
",",
"pat",
")",
"# now adds the exact matches (no wilcards) which override",
"# the pattern-matches",
"_resolved_dict",
".",
"update",
"(",
"(",
"name",
",",
"ai",
")",
"for",
"name",
",",
"ai",
"in",
"_dict",
".",
"items",
"(",
")",
"if",
"name",
"in",
"log_props",
")",
"# checks that all getter/setter have a corresponding logical",
"# property",
"not_found",
"=",
"[",
"name",
"for",
"name",
"in",
"_resolved_dict",
"if",
"name",
"not",
"in",
"log_props",
"]",
"if",
"not_found",
":",
"logger",
".",
"warning",
"(",
"\"In class %s.%s logical %s were declared for \"",
"\"non-existent observables: %s\"",
",",
"cls",
".",
"__module__",
",",
"cls",
".",
"__name__",
",",
"_dict_name",
",",
"str",
"(",
"not_found",
")",
")",
"# creates the properties",
"for",
"name",
"in",
"log_props",
":",
"# finds the getter",
"ai_get",
"=",
"resolved_getdict",
".",
"get",
"(",
"name",
",",
"None",
")",
"if",
"ai_get",
":",
"# decorator-based",
"_getter",
"=",
"type",
"(",
"cls",
")",
".",
"get_getter",
"(",
"cls",
",",
"name",
",",
"ai_get",
".",
"func",
",",
"ai_get",
".",
"has_args",
")",
"_deps",
"=",
"ai_get",
".",
"deps",
"else",
":",
"# old style",
"_getter",
"=",
"type",
"(",
"cls",
")",
".",
"get_getter",
"(",
"cls",
",",
"name",
")",
"if",
"_getter",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"In class %s.%s \"",
"\"logical observable '%s' \"",
"\"has no getter method\"",
"%",
"(",
"cls",
".",
"__module__",
",",
"cls",
".",
"__name__",
",",
"name",
")",
")",
"_deps",
"=",
"type",
"(",
"cls",
")",
".",
"_get_old_style_getter_deps",
"(",
"cls",
",",
"name",
",",
"_getter",
")",
"# finds the setter",
"ai_set",
"=",
"resolved_setdict",
".",
"get",
"(",
"name",
",",
"None",
")",
"if",
"ai_set",
":",
"# decorator-based",
"if",
"ai_get",
":",
"_setter",
"=",
"type",
"(",
"cls",
")",
".",
"get_setter",
"(",
"cls",
",",
"name",
",",
"ai_set",
".",
"func",
",",
"ai_set",
".",
"has_args",
",",
"ai_get",
".",
"func",
",",
"ai_get",
".",
"has_args",
")",
"else",
":",
"# the getter is old style. _getter is already",
"# resolved wrt the name it may take, so",
"# getter_takes_name is False",
"_setter",
"=",
"type",
"(",
"cls",
")",
".",
"get_setter",
"(",
"cls",
",",
"name",
",",
"ai_set",
".",
"func",
",",
"ai_set",
".",
"has_args",
",",
"_getter",
",",
"False",
")",
"else",
":",
"# old style setter",
"if",
"ai_get",
":",
"_setter",
"=",
"type",
"(",
"cls",
")",
".",
"get_setter",
"(",
"cls",
",",
"name",
",",
"None",
",",
"None",
",",
"ai_get",
".",
"func",
",",
"ai_get",
".",
"has_args",
")",
"else",
":",
"_setter",
"=",
"type",
"(",
"cls",
")",
".",
"get_setter",
"(",
"cls",
",",
"name",
")",
"# creates the logical property, here _setter can be None",
"prop",
"=",
"PropertyMeta",
".",
"LogicalOP",
"(",
"_getter",
",",
"_setter",
",",
"frozenset",
"(",
"_deps",
")",
")",
"setattr",
"(",
"cls",
",",
"name",
",",
"prop",
")",
"real_log_props",
".",
"add",
"(",
"name",
")",
"# checks that all setters have a getter",
"setters_no_getters",
"=",
"(",
"set",
"(",
"resolved_setdict",
")",
"-",
"real_log_props",
")",
"&",
"log_props",
"if",
"setters_no_getters",
":",
"logger",
".",
"warning",
"(",
"\"In class %s.%s logical setters have no \"",
"\"getters: %s\"",
",",
"cls",
".",
"__module__",
",",
"cls",
".",
"__name__",
",",
"\", \"",
".",
"join",
"(",
"setters_no_getters",
")",
")",
"return",
"frozenset",
"(",
"real_log_props",
")"
] | Creates all the logical property.
The list of names of properties to be created is passed
with frozenset log_props. The getter/setter information is
taken from _{get,set}dict.
This method resolves also wildcards in names, and performs
all checks to ensure correctness.
Returns the frozen set of the actually created properties
(as not log_props may be really created, e.g. when no
getter is provided, and a warning is issued). | [
"Creates",
"all",
"the",
"logical",
"property",
"."
] | python | train |
vinci1it2000/schedula | schedula/utils/alg.py | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L202-L227 | def _get_node(nodes, node_id, fuzzy=True):
"""
Returns a dispatcher node that match the given node id.
:param nodes:
Dispatcher nodes.
:type nodes: dict
:param node_id:
Node id.
:type node_id: str
:return:
The dispatcher node and its id.
:rtype: (str, dict)
"""
try:
return node_id, nodes[node_id] # Return dispatcher node and its id.
except KeyError as ex:
if fuzzy:
it = sorted(nodes.items())
n = next(((k, v) for k, v in it if node_id in k), EMPTY)
if n is not EMPTY:
return n
raise ex | [
"def",
"_get_node",
"(",
"nodes",
",",
"node_id",
",",
"fuzzy",
"=",
"True",
")",
":",
"try",
":",
"return",
"node_id",
",",
"nodes",
"[",
"node_id",
"]",
"# Return dispatcher node and its id.",
"except",
"KeyError",
"as",
"ex",
":",
"if",
"fuzzy",
":",
"it",
"=",
"sorted",
"(",
"nodes",
".",
"items",
"(",
")",
")",
"n",
"=",
"next",
"(",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"it",
"if",
"node_id",
"in",
"k",
")",
",",
"EMPTY",
")",
"if",
"n",
"is",
"not",
"EMPTY",
":",
"return",
"n",
"raise",
"ex"
] | Returns a dispatcher node that match the given node id.
:param nodes:
Dispatcher nodes.
:type nodes: dict
:param node_id:
Node id.
:type node_id: str
:return:
The dispatcher node and its id.
:rtype: (str, dict) | [
"Returns",
"a",
"dispatcher",
"node",
"that",
"match",
"the",
"given",
"node",
"id",
"."
] | python | train |
kentik/kentikapi-py | kentikapi/v5/tagging.py | https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L124-L130 | def build_json(self, guid):
"""Build JSON with the input guid"""
upserts = []
for value in self.upserts:
upserts.append({"value": value, "criteria": self.upserts[value]})
return json.dumps({'replace_all': self.replace_all, 'guid': guid,
'complete': self.complete, 'upserts': upserts, 'deletes': self.deletes}) | [
"def",
"build_json",
"(",
"self",
",",
"guid",
")",
":",
"upserts",
"=",
"[",
"]",
"for",
"value",
"in",
"self",
".",
"upserts",
":",
"upserts",
".",
"append",
"(",
"{",
"\"value\"",
":",
"value",
",",
"\"criteria\"",
":",
"self",
".",
"upserts",
"[",
"value",
"]",
"}",
")",
"return",
"json",
".",
"dumps",
"(",
"{",
"'replace_all'",
":",
"self",
".",
"replace_all",
",",
"'guid'",
":",
"guid",
",",
"'complete'",
":",
"self",
".",
"complete",
",",
"'upserts'",
":",
"upserts",
",",
"'deletes'",
":",
"self",
".",
"deletes",
"}",
")"
] | Build JSON with the input guid | [
"Build",
"JSON",
"with",
"the",
"input",
"guid"
] | python | train |
noahbenson/pimms | pimms/calculation.py | https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/calculation.py#L357-L362 | def discard(self, *args):
'''
cplan.discard(...) yields a new calculation plan identical to cplan except without any of
the calculation steps listed in the arguments.
'''
return Plan(reduce(lambda m,k: m.discard(k), args, self.nodes)) | [
"def",
"discard",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"Plan",
"(",
"reduce",
"(",
"lambda",
"m",
",",
"k",
":",
"m",
".",
"discard",
"(",
"k",
")",
",",
"args",
",",
"self",
".",
"nodes",
")",
")"
] | cplan.discard(...) yields a new calculation plan identical to cplan except without any of
the calculation steps listed in the arguments. | [
"cplan",
".",
"discard",
"(",
"...",
")",
"yields",
"a",
"new",
"calculation",
"plan",
"identical",
"to",
"cplan",
"except",
"without",
"any",
"of",
"the",
"calculation",
"steps",
"listed",
"in",
"the",
"arguments",
"."
] | python | train |
ianmiell/shutit | shutit_class.py | https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L1673-L1707 | def get_file(self,
target_path,
host_path,
note=None,
loglevel=logging.DEBUG):
"""Copy a file from the target machine to the host machine
@param target_path: path to file in the target
@param host_path: path to file on the host machine (e.g. copy test)
@param note: See send()
@type target_path: string
@type host_path: string
@return: boolean
@rtype: string
"""
shutit_global.shutit_global_object.yield_to_draw()
self.handle_note(note)
# Only handle for docker initially, return false in case we care
if self.build['delivery'] != 'docker':
return False
# on the host, run:
#Usage: docker cp [OPTIONS] CONTAINER:PATH LOCALPATH|-
# Need: host env, container id, path from and path to
shutit_pexpect_child = self.get_shutit_pexpect_session_from_id('host_child').pexpect_child
expect = self.expect_prompts['ORIGIN_ENV']
self.send('docker cp ' + self.target['container_id'] + ':' + target_path + ' ' + host_path,
shutit_pexpect_child=shutit_pexpect_child,
expect=expect,
check_exit=False,
echo=False,
loglevel=loglevel)
self.handle_note_after(note=note)
return True | [
"def",
"get_file",
"(",
"self",
",",
"target_path",
",",
"host_path",
",",
"note",
"=",
"None",
",",
"loglevel",
"=",
"logging",
".",
"DEBUG",
")",
":",
"shutit_global",
".",
"shutit_global_object",
".",
"yield_to_draw",
"(",
")",
"self",
".",
"handle_note",
"(",
"note",
")",
"# Only handle for docker initially, return false in case we care",
"if",
"self",
".",
"build",
"[",
"'delivery'",
"]",
"!=",
"'docker'",
":",
"return",
"False",
"# on the host, run:",
"#Usage: docker cp [OPTIONS] CONTAINER:PATH LOCALPATH|-",
"# Need: host env, container id, path from and path to",
"shutit_pexpect_child",
"=",
"self",
".",
"get_shutit_pexpect_session_from_id",
"(",
"'host_child'",
")",
".",
"pexpect_child",
"expect",
"=",
"self",
".",
"expect_prompts",
"[",
"'ORIGIN_ENV'",
"]",
"self",
".",
"send",
"(",
"'docker cp '",
"+",
"self",
".",
"target",
"[",
"'container_id'",
"]",
"+",
"':'",
"+",
"target_path",
"+",
"' '",
"+",
"host_path",
",",
"shutit_pexpect_child",
"=",
"shutit_pexpect_child",
",",
"expect",
"=",
"expect",
",",
"check_exit",
"=",
"False",
",",
"echo",
"=",
"False",
",",
"loglevel",
"=",
"loglevel",
")",
"self",
".",
"handle_note_after",
"(",
"note",
"=",
"note",
")",
"return",
"True"
] | Copy a file from the target machine to the host machine
@param target_path: path to file in the target
@param host_path: path to file on the host machine (e.g. copy test)
@param note: See send()
@type target_path: string
@type host_path: string
@return: boolean
@rtype: string | [
"Copy",
"a",
"file",
"from",
"the",
"target",
"machine",
"to",
"the",
"host",
"machine"
] | python | train |
RJT1990/pyflux | pyflux/ssm/nllm.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/nllm.py#L300-L308 | def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
self.latent_variables.add_z('Sigma^2 level', fam.Flat(transform='exp'), fam.Normal(0,3)) | [
"def",
"_create_latent_variables",
"(",
"self",
")",
":",
"self",
".",
"latent_variables",
".",
"add_z",
"(",
"'Sigma^2 level'",
",",
"fam",
".",
"Flat",
"(",
"transform",
"=",
"'exp'",
")",
",",
"fam",
".",
"Normal",
"(",
"0",
",",
"3",
")",
")"
] | Creates model latent variables
Returns
----------
None (changes model attributes) | [
"Creates",
"model",
"latent",
"variables"
] | python | train |
vicenteneto/python-cartolafc | cartolafc/api.py | https://github.com/vicenteneto/python-cartolafc/blob/15b2a192d7745f454d69a55ac9b7ef7c7abb53b9/cartolafc/api.py#L91-L110 | def set_credentials(self, email, password):
""" Realiza a autenticação no sistema do CartolaFC utilizando o email e password informados.
Args:
email (str): O email do usuário
password (str): A senha do usuário
Raises:
cartolafc.CartolaFCError: Se o conjunto (email, password) não conseguiu realizar a autenticação com sucesso.
"""
self._email = email
self._password = password
response = requests.post(self._auth_url,
json=dict(payload=dict(email=self._email, password=self._password, serviceId=4728)))
body = response.json()
if response.status_code == codes.ok:
self._glb_id = body['glbId']
else:
raise CartolaFCError(body['userMessage']) | [
"def",
"set_credentials",
"(",
"self",
",",
"email",
",",
"password",
")",
":",
"self",
".",
"_email",
"=",
"email",
"self",
".",
"_password",
"=",
"password",
"response",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"_auth_url",
",",
"json",
"=",
"dict",
"(",
"payload",
"=",
"dict",
"(",
"email",
"=",
"self",
".",
"_email",
",",
"password",
"=",
"self",
".",
"_password",
",",
"serviceId",
"=",
"4728",
")",
")",
")",
"body",
"=",
"response",
".",
"json",
"(",
")",
"if",
"response",
".",
"status_code",
"==",
"codes",
".",
"ok",
":",
"self",
".",
"_glb_id",
"=",
"body",
"[",
"'glbId'",
"]",
"else",
":",
"raise",
"CartolaFCError",
"(",
"body",
"[",
"'userMessage'",
"]",
")"
] | Realiza a autenticação no sistema do CartolaFC utilizando o email e password informados.
Args:
email (str): O email do usuário
password (str): A senha do usuário
Raises:
cartolafc.CartolaFCError: Se o conjunto (email, password) não conseguiu realizar a autenticação com sucesso. | [
"Realiza",
"a",
"autenticação",
"no",
"sistema",
"do",
"CartolaFC",
"utilizando",
"o",
"email",
"e",
"password",
"informados",
"."
] | python | train |
internetarchive/warc | warc/arc.py | https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L307-L335 | def _read_file_header(self):
"""Reads out the file header for the arc file. If version was
not provided, this will autopopulate it."""
header = self.fileobj.readline()
payload1 = self.fileobj.readline()
payload2 = self.fileobj.readline()
version, reserved, organisation = payload1.split(None, 2)
self.fileobj.readline() # Lose the separator newline
self.header_read = True
# print "--------------------------------------------------"
# print header,"\n", payload1, "\n", payload2,"\n"
# print "--------------------------------------------------"
if self.version and int(self.version) != version:
raise IOError("Version mismatch. Requested version was '%s' but version in file was '%s'"%(self.version, version))
if version == '1':
url, ip_address, date, content_type, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 1
elif version == '2':
url, ip_address, date, content_type, result_code, checksum, location, offset, filename, length = header.split()
self.file_headers = {"ip_address" : ip_address,
"date" : datetime.datetime.strptime(date, "%Y%m%d%H%M%S"),
"org" : organisation}
self.version = 2
else:
raise IOError("Unknown ARC version '%s'"%version) | [
"def",
"_read_file_header",
"(",
"self",
")",
":",
"header",
"=",
"self",
".",
"fileobj",
".",
"readline",
"(",
")",
"payload1",
"=",
"self",
".",
"fileobj",
".",
"readline",
"(",
")",
"payload2",
"=",
"self",
".",
"fileobj",
".",
"readline",
"(",
")",
"version",
",",
"reserved",
",",
"organisation",
"=",
"payload1",
".",
"split",
"(",
"None",
",",
"2",
")",
"self",
".",
"fileobj",
".",
"readline",
"(",
")",
"# Lose the separator newline",
"self",
".",
"header_read",
"=",
"True",
"# print \"--------------------------------------------------\"",
"# print header,\"\\n\", payload1, \"\\n\", payload2,\"\\n\"",
"# print \"--------------------------------------------------\"",
"if",
"self",
".",
"version",
"and",
"int",
"(",
"self",
".",
"version",
")",
"!=",
"version",
":",
"raise",
"IOError",
"(",
"\"Version mismatch. Requested version was '%s' but version in file was '%s'\"",
"%",
"(",
"self",
".",
"version",
",",
"version",
")",
")",
"if",
"version",
"==",
"'1'",
":",
"url",
",",
"ip_address",
",",
"date",
",",
"content_type",
",",
"length",
"=",
"header",
".",
"split",
"(",
")",
"self",
".",
"file_headers",
"=",
"{",
"\"ip_address\"",
":",
"ip_address",
",",
"\"date\"",
":",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"date",
",",
"\"%Y%m%d%H%M%S\"",
")",
",",
"\"org\"",
":",
"organisation",
"}",
"self",
".",
"version",
"=",
"1",
"elif",
"version",
"==",
"'2'",
":",
"url",
",",
"ip_address",
",",
"date",
",",
"content_type",
",",
"result_code",
",",
"checksum",
",",
"location",
",",
"offset",
",",
"filename",
",",
"length",
"=",
"header",
".",
"split",
"(",
")",
"self",
".",
"file_headers",
"=",
"{",
"\"ip_address\"",
":",
"ip_address",
",",
"\"date\"",
":",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"date",
",",
"\"%Y%m%d%H%M%S\"",
")",
",",
"\"org\"",
":",
"organisation",
"}",
"self",
".",
"version",
"=",
"2",
"else",
":",
"raise",
"IOError",
"(",
"\"Unknown ARC version '%s'\"",
"%",
"version",
")"
] | Reads out the file header for the arc file. If version was
not provided, this will autopopulate it. | [
"Reads",
"out",
"the",
"file",
"header",
"for",
"the",
"arc",
"file",
".",
"If",
"version",
"was",
"not",
"provided",
"this",
"will",
"autopopulate",
"it",
"."
] | python | train |
saltstack/salt | salt/states/heat.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/heat.py#L80-L99 | def _parse_template(tmpl_str):
'''
Parsing template
'''
tmpl_str = tmpl_str.strip()
if tmpl_str.startswith('{'):
tpl = salt.utils.json.loads(tmpl_str)
else:
try:
tpl = salt.utils.yaml.safe_load(tmpl_str)
except salt.utils.yaml.YAMLError as exc:
raise ValueError(six.text_type(exc))
else:
if tpl is None:
tpl = {}
if not ('HeatTemplateFormatVersion' in tpl
or 'heat_template_version' in tpl
or 'AWSTemplateFormatVersion' in tpl):
raise ValueError(('Template format version not found.'))
return tpl | [
"def",
"_parse_template",
"(",
"tmpl_str",
")",
":",
"tmpl_str",
"=",
"tmpl_str",
".",
"strip",
"(",
")",
"if",
"tmpl_str",
".",
"startswith",
"(",
"'{'",
")",
":",
"tpl",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"tmpl_str",
")",
"else",
":",
"try",
":",
"tpl",
"=",
"salt",
".",
"utils",
".",
"yaml",
".",
"safe_load",
"(",
"tmpl_str",
")",
"except",
"salt",
".",
"utils",
".",
"yaml",
".",
"YAMLError",
"as",
"exc",
":",
"raise",
"ValueError",
"(",
"six",
".",
"text_type",
"(",
"exc",
")",
")",
"else",
":",
"if",
"tpl",
"is",
"None",
":",
"tpl",
"=",
"{",
"}",
"if",
"not",
"(",
"'HeatTemplateFormatVersion'",
"in",
"tpl",
"or",
"'heat_template_version'",
"in",
"tpl",
"or",
"'AWSTemplateFormatVersion'",
"in",
"tpl",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'Template format version not found.'",
")",
")",
"return",
"tpl"
] | Parsing template | [
"Parsing",
"template"
] | python | train |
pyviz/holoviews | holoviews/core/boundingregion.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/boundingregion.py#L298-L300 | def lbrt(self):
"""Return (left,bottom,right,top) as a tuple."""
return self._left, self._bottom, self._right, self._top | [
"def",
"lbrt",
"(",
"self",
")",
":",
"return",
"self",
".",
"_left",
",",
"self",
".",
"_bottom",
",",
"self",
".",
"_right",
",",
"self",
".",
"_top"
] | Return (left,bottom,right,top) as a tuple. | [
"Return",
"(",
"left",
"bottom",
"right",
"top",
")",
"as",
"a",
"tuple",
"."
] | python | train |
miyakogi/wdom | wdom/document.py | https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/document.py#L315-L317 | def add_cssfile(self, src: str) -> None:
"""Add CSS file to load at this document's header."""
self.head.appendChild(Link(rel='stylesheet', href=src)) | [
"def",
"add_cssfile",
"(",
"self",
",",
"src",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"head",
".",
"appendChild",
"(",
"Link",
"(",
"rel",
"=",
"'stylesheet'",
",",
"href",
"=",
"src",
")",
")"
] | Add CSS file to load at this document's header. | [
"Add",
"CSS",
"file",
"to",
"load",
"at",
"this",
"document",
"s",
"header",
"."
] | python | train |
amcat/nlpipe | nlpipe/modules/frog.py | https://github.com/amcat/nlpipe/blob/e9dcf0214d5dc6ba3900b8d7359909e1e33f1ce7/nlpipe/modules/frog.py#L48-L63 | def call_frog(text):
"""
Call frog on the text and return (sent, offset, word, lemma, pos, morphofeat) tuples
"""
host, port = os.environ.get('FROG_HOST', 'localhost:9887').split(":")
frogclient = FrogClient(host, port, returnall=True)
sent = 1
offset = 0
for word, lemma, morph, morphofeat, ner, chunk, _p1, _p2 in frogclient.process(text):
if word is None:
sent += 1
else:
pos = _POSMAP[morphofeat.split("(")[0]]
yield Token(sent, offset, word, lemma, pos, morphofeat, ner, chunk)
offset += len(word) | [
"def",
"call_frog",
"(",
"text",
")",
":",
"host",
",",
"port",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'FROG_HOST'",
",",
"'localhost:9887'",
")",
".",
"split",
"(",
"\":\"",
")",
"frogclient",
"=",
"FrogClient",
"(",
"host",
",",
"port",
",",
"returnall",
"=",
"True",
")",
"sent",
"=",
"1",
"offset",
"=",
"0",
"for",
"word",
",",
"lemma",
",",
"morph",
",",
"morphofeat",
",",
"ner",
",",
"chunk",
",",
"_p1",
",",
"_p2",
"in",
"frogclient",
".",
"process",
"(",
"text",
")",
":",
"if",
"word",
"is",
"None",
":",
"sent",
"+=",
"1",
"else",
":",
"pos",
"=",
"_POSMAP",
"[",
"morphofeat",
".",
"split",
"(",
"\"(\"",
")",
"[",
"0",
"]",
"]",
"yield",
"Token",
"(",
"sent",
",",
"offset",
",",
"word",
",",
"lemma",
",",
"pos",
",",
"morphofeat",
",",
"ner",
",",
"chunk",
")",
"offset",
"+=",
"len",
"(",
"word",
")"
] | Call frog on the text and return (sent, offset, word, lemma, pos, morphofeat) tuples | [
"Call",
"frog",
"on",
"the",
"text",
"and",
"return",
"(",
"sent",
"offset",
"word",
"lemma",
"pos",
"morphofeat",
")",
"tuples"
] | python | train |
chop-dbhi/varify-data-warehouse | vdw/pipeline/checks.py | https://github.com/chop-dbhi/varify-data-warehouse/blob/1600ee1bc5fae6c68fd03b23624467298570cca8/vdw/pipeline/checks.py#L4-L16 | def record_is_valid(record):
"Checks if a record is valid for processing."
# No random contigs
if record.CHROM.startswith('GL'):
return False
# Skip results with a read depth < 5. If no read depth is specified then
# we have no choice but to consider this record as being valid.
if 'DP' in record.INFO and record.INFO['DP'] < 5:
return False
return True | [
"def",
"record_is_valid",
"(",
"record",
")",
":",
"# No random contigs",
"if",
"record",
".",
"CHROM",
".",
"startswith",
"(",
"'GL'",
")",
":",
"return",
"False",
"# Skip results with a read depth < 5. If no read depth is specified then",
"# we have no choice but to consider this record as being valid.",
"if",
"'DP'",
"in",
"record",
".",
"INFO",
"and",
"record",
".",
"INFO",
"[",
"'DP'",
"]",
"<",
"5",
":",
"return",
"False",
"return",
"True"
] | Checks if a record is valid for processing. | [
"Checks",
"if",
"a",
"record",
"is",
"valid",
"for",
"processing",
"."
] | python | train |
chrisspen/burlap | burlap/dj.py | https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/dj.py#L258-L365 | def install_sql(self, site=None, database='default', apps=None, stop_on_error=0, fn=None):
"""
Installs all custom SQL.
"""
#from burlap.db import load_db_set
stop_on_error = int(stop_on_error)
site = site or ALL
name = database
r = self.local_renderer
paths = glob.glob(r.format(r.env.install_sql_path_template))
apps = [_ for _ in (apps or '').split(',') if _.strip()]
if self.verbose:
print('install_sql.apps:', apps)
def cmp_paths(d0, d1):
if d0[1] and d0[1] in d1[2]:
return -1
if d1[1] and d1[1] in d0[2]:
return +1
return cmp(d0[0], d1[0])
def get_paths(t):
"""
Returns SQL file paths in an execution order that respect dependencies.
"""
data = [] # [(path, view_name, content)]
for path in paths:
if fn and fn not in path:
continue
parts = path.split('.')
if len(parts) == 3 and parts[1] != t:
continue
if not path.lower().endswith('.sql'):
continue
content = open(path, 'r').read()
matches = re.findall(r'[\s\t]+VIEW[\s\t]+([a-zA-Z0-9_]{3,})', content, flags=re.IGNORECASE)
view_name = ''
if matches:
view_name = matches[0]
print('Found view %s.' % view_name)
data.append((path, view_name, content))
for d in sorted(data, cmp=cmp_paths):
yield d[0]
def run_paths(paths, cmd_template, max_retries=3):
r = self.local_renderer
paths = list(paths)
error_counts = defaultdict(int) # {path:count}
terminal = set()
if self.verbose:
print('Checking %i paths.' % len(paths))
while paths:
path = paths.pop(0)
if self.verbose:
print('path:', path)
app_name = re.findall(r'/([^/]+)/sql/', path)[0]
if apps and app_name not in apps:
self.vprint('skipping because app_name %s not in apps' % app_name)
continue
with self.settings(warn_only=True):
if self.is_local:
r.env.sql_path = path
else:
r.env.sql_path = '/tmp/%s' % os.path.split(path)[-1]
r.put(local_path=path, remote_path=r.env.sql_path)
ret = r.run_or_local(cmd_template)
if ret and ret.return_code:
if stop_on_error:
raise Exception('Unable to execute file %s' % path)
error_counts[path] += 1
if error_counts[path] < max_retries:
paths.append(path)
else:
terminal.add(path)
if terminal:
print('%i files could not be loaded.' % len(terminal), file=sys.stderr)
for path in sorted(list(terminal)):
print(path, file=sys.stderr)
print(file=sys.stderr)
if self.verbose:
print('install_sql.db_engine:', r.env.db_engine)
for _site, site_data in self.iter_sites(site=site, no_secure=True):
self.set_db(name=name, site=_site)
if 'postgres' in r.env.db_engine or 'postgis' in r.env.db_engine:
paths = list(get_paths('postgresql'))
run_paths(
paths=paths,
cmd_template="psql --host={db_host} --user={db_user} --no-password -d {db_name} -f {sql_path}")
elif 'mysql' in r.env.db_engine:
paths = list(get_paths('mysql'))
run_paths(
paths=paths,
cmd_template="mysql -v -h {db_host} -u {db_user} -p'{db_password}' {db_name} < {sql_path}")
else:
raise NotImplementedError | [
"def",
"install_sql",
"(",
"self",
",",
"site",
"=",
"None",
",",
"database",
"=",
"'default'",
",",
"apps",
"=",
"None",
",",
"stop_on_error",
"=",
"0",
",",
"fn",
"=",
"None",
")",
":",
"#from burlap.db import load_db_set",
"stop_on_error",
"=",
"int",
"(",
"stop_on_error",
")",
"site",
"=",
"site",
"or",
"ALL",
"name",
"=",
"database",
"r",
"=",
"self",
".",
"local_renderer",
"paths",
"=",
"glob",
".",
"glob",
"(",
"r",
".",
"format",
"(",
"r",
".",
"env",
".",
"install_sql_path_template",
")",
")",
"apps",
"=",
"[",
"_",
"for",
"_",
"in",
"(",
"apps",
"or",
"''",
")",
".",
"split",
"(",
"','",
")",
"if",
"_",
".",
"strip",
"(",
")",
"]",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'install_sql.apps:'",
",",
"apps",
")",
"def",
"cmp_paths",
"(",
"d0",
",",
"d1",
")",
":",
"if",
"d0",
"[",
"1",
"]",
"and",
"d0",
"[",
"1",
"]",
"in",
"d1",
"[",
"2",
"]",
":",
"return",
"-",
"1",
"if",
"d1",
"[",
"1",
"]",
"and",
"d1",
"[",
"1",
"]",
"in",
"d0",
"[",
"2",
"]",
":",
"return",
"+",
"1",
"return",
"cmp",
"(",
"d0",
"[",
"0",
"]",
",",
"d1",
"[",
"0",
"]",
")",
"def",
"get_paths",
"(",
"t",
")",
":",
"\"\"\"\n Returns SQL file paths in an execution order that respect dependencies.\n \"\"\"",
"data",
"=",
"[",
"]",
"# [(path, view_name, content)]",
"for",
"path",
"in",
"paths",
":",
"if",
"fn",
"and",
"fn",
"not",
"in",
"path",
":",
"continue",
"parts",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"3",
"and",
"parts",
"[",
"1",
"]",
"!=",
"t",
":",
"continue",
"if",
"not",
"path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.sql'",
")",
":",
"continue",
"content",
"=",
"open",
"(",
"path",
",",
"'r'",
")",
".",
"read",
"(",
")",
"matches",
"=",
"re",
".",
"findall",
"(",
"r'[\\s\\t]+VIEW[\\s\\t]+([a-zA-Z0-9_]{3,})'",
",",
"content",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"view_name",
"=",
"''",
"if",
"matches",
":",
"view_name",
"=",
"matches",
"[",
"0",
"]",
"print",
"(",
"'Found view %s.'",
"%",
"view_name",
")",
"data",
".",
"append",
"(",
"(",
"path",
",",
"view_name",
",",
"content",
")",
")",
"for",
"d",
"in",
"sorted",
"(",
"data",
",",
"cmp",
"=",
"cmp_paths",
")",
":",
"yield",
"d",
"[",
"0",
"]",
"def",
"run_paths",
"(",
"paths",
",",
"cmd_template",
",",
"max_retries",
"=",
"3",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"paths",
"=",
"list",
"(",
"paths",
")",
"error_counts",
"=",
"defaultdict",
"(",
"int",
")",
"# {path:count}",
"terminal",
"=",
"set",
"(",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'Checking %i paths.'",
"%",
"len",
"(",
"paths",
")",
")",
"while",
"paths",
":",
"path",
"=",
"paths",
".",
"pop",
"(",
"0",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'path:'",
",",
"path",
")",
"app_name",
"=",
"re",
".",
"findall",
"(",
"r'/([^/]+)/sql/'",
",",
"path",
")",
"[",
"0",
"]",
"if",
"apps",
"and",
"app_name",
"not",
"in",
"apps",
":",
"self",
".",
"vprint",
"(",
"'skipping because app_name %s not in apps'",
"%",
"app_name",
")",
"continue",
"with",
"self",
".",
"settings",
"(",
"warn_only",
"=",
"True",
")",
":",
"if",
"self",
".",
"is_local",
":",
"r",
".",
"env",
".",
"sql_path",
"=",
"path",
"else",
":",
"r",
".",
"env",
".",
"sql_path",
"=",
"'/tmp/%s'",
"%",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"[",
"-",
"1",
"]",
"r",
".",
"put",
"(",
"local_path",
"=",
"path",
",",
"remote_path",
"=",
"r",
".",
"env",
".",
"sql_path",
")",
"ret",
"=",
"r",
".",
"run_or_local",
"(",
"cmd_template",
")",
"if",
"ret",
"and",
"ret",
".",
"return_code",
":",
"if",
"stop_on_error",
":",
"raise",
"Exception",
"(",
"'Unable to execute file %s'",
"%",
"path",
")",
"error_counts",
"[",
"path",
"]",
"+=",
"1",
"if",
"error_counts",
"[",
"path",
"]",
"<",
"max_retries",
":",
"paths",
".",
"append",
"(",
"path",
")",
"else",
":",
"terminal",
".",
"add",
"(",
"path",
")",
"if",
"terminal",
":",
"print",
"(",
"'%i files could not be loaded.'",
"%",
"len",
"(",
"terminal",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"for",
"path",
"in",
"sorted",
"(",
"list",
"(",
"terminal",
")",
")",
":",
"print",
"(",
"path",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"'install_sql.db_engine:'",
",",
"r",
".",
"env",
".",
"db_engine",
")",
"for",
"_site",
",",
"site_data",
"in",
"self",
".",
"iter_sites",
"(",
"site",
"=",
"site",
",",
"no_secure",
"=",
"True",
")",
":",
"self",
".",
"set_db",
"(",
"name",
"=",
"name",
",",
"site",
"=",
"_site",
")",
"if",
"'postgres'",
"in",
"r",
".",
"env",
".",
"db_engine",
"or",
"'postgis'",
"in",
"r",
".",
"env",
".",
"db_engine",
":",
"paths",
"=",
"list",
"(",
"get_paths",
"(",
"'postgresql'",
")",
")",
"run_paths",
"(",
"paths",
"=",
"paths",
",",
"cmd_template",
"=",
"\"psql --host={db_host} --user={db_user} --no-password -d {db_name} -f {sql_path}\"",
")",
"elif",
"'mysql'",
"in",
"r",
".",
"env",
".",
"db_engine",
":",
"paths",
"=",
"list",
"(",
"get_paths",
"(",
"'mysql'",
")",
")",
"run_paths",
"(",
"paths",
"=",
"paths",
",",
"cmd_template",
"=",
"\"mysql -v -h {db_host} -u {db_user} -p'{db_password}' {db_name} < {sql_path}\"",
")",
"else",
":",
"raise",
"NotImplementedError"
] | Installs all custom SQL. | [
"Installs",
"all",
"custom",
"SQL",
"."
] | python | valid |
contentful/contentful.py | contentful/client.py | https://github.com/contentful/contentful.py/blob/73fe01d6ae5a1f8818880da65199107b584681dd/contentful/client.py#L259-L279 | def asset(self, asset_id, query=None):
"""Fetches an Asset by ID.
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/assets/asset/get-a-single-asset
:param asset_id: The ID of the target Asset.
:param query: (optional) Dict with API options.
:return: :class:`Asset <Asset>` object.
:rtype: contentful.asset.Asset
Usage:
>>> nyancat_asset = client.asset('nyancat')
<Asset id='nyancat' url='//images.contentful.com/cfex...'>
"""
return self._get(
self.environment_url(
'/assets/{0}'.format(asset_id)
),
query
) | [
"def",
"asset",
"(",
"self",
",",
"asset_id",
",",
"query",
"=",
"None",
")",
":",
"return",
"self",
".",
"_get",
"(",
"self",
".",
"environment_url",
"(",
"'/assets/{0}'",
".",
"format",
"(",
"asset_id",
")",
")",
",",
"query",
")"
] | Fetches an Asset by ID.
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/assets/asset/get-a-single-asset
:param asset_id: The ID of the target Asset.
:param query: (optional) Dict with API options.
:return: :class:`Asset <Asset>` object.
:rtype: contentful.asset.Asset
Usage:
>>> nyancat_asset = client.asset('nyancat')
<Asset id='nyancat' url='//images.contentful.com/cfex...'> | [
"Fetches",
"an",
"Asset",
"by",
"ID",
"."
] | python | train |
JarryShaw/PyPCAPKit | src/protocols/internet/hip.py | https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/hip.py#L1600-L1645 | def _read_para_esp_transform(self, code, cbit, clen, *, desc, length, version):
"""Read HIP ESP_TRANSFORM parameter.
Structure of HIP ESP_TRANSFORM parameter [RFC 7402]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved | Suite ID #1 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Suite ID #2 | Suite ID #3 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Suite ID #n | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 esp_transform.type Parameter Type
1 15 esp_transform.critical Critical Bit
2 16 esp_transform.length Length of Contents
4 32 - Reserved
6 48 esp_transform.id Suite ID
............
? ? - Padding
"""
if clen % 2 != 0:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format')
_resv = self._read_fileng(2)
_stid = list()
for _ in range((clen - 2) // 2):
_stid.append(_ESP_SUITE_ID.get(self._read_unpack(2), 'Unassigned'))
esp_transform = dict(
type=desc,
critical=cbit,
length=clen,
id=tuple(_stid),
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return esp_transform | [
"def",
"_read_para_esp_transform",
"(",
"self",
",",
"code",
",",
"cbit",
",",
"clen",
",",
"*",
",",
"desc",
",",
"length",
",",
"version",
")",
":",
"if",
"clen",
"%",
"2",
"!=",
"0",
":",
"raise",
"ProtocolError",
"(",
"f'HIPv{version}: [Parano {code}] invalid format'",
")",
"_resv",
"=",
"self",
".",
"_read_fileng",
"(",
"2",
")",
"_stid",
"=",
"list",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"(",
"clen",
"-",
"2",
")",
"//",
"2",
")",
":",
"_stid",
".",
"append",
"(",
"_ESP_SUITE_ID",
".",
"get",
"(",
"self",
".",
"_read_unpack",
"(",
"2",
")",
",",
"'Unassigned'",
")",
")",
"esp_transform",
"=",
"dict",
"(",
"type",
"=",
"desc",
",",
"critical",
"=",
"cbit",
",",
"length",
"=",
"clen",
",",
"id",
"=",
"tuple",
"(",
"_stid",
")",
",",
")",
"_plen",
"=",
"length",
"-",
"clen",
"if",
"_plen",
":",
"self",
".",
"_read_fileng",
"(",
"_plen",
")",
"return",
"esp_transform"
] | Read HIP ESP_TRANSFORM parameter.
Structure of HIP ESP_TRANSFORM parameter [RFC 7402]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved | Suite ID #1 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Suite ID #2 | Suite ID #3 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Suite ID #n | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 esp_transform.type Parameter Type
1 15 esp_transform.critical Critical Bit
2 16 esp_transform.length Length of Contents
4 32 - Reserved
6 48 esp_transform.id Suite ID
............
? ? - Padding | [
"Read",
"HIP",
"ESP_TRANSFORM",
"parameter",
"."
] | python | train |
quantopian/zipline | zipline/finance/metrics/tracker.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/tracker.py#L277-L328 | def handle_market_close(self, dt, data_portal):
"""Handles the close of the given day.
Parameters
----------
dt : Timestamp
The most recently completed simulation datetime.
data_portal : DataPortal
The current data portal.
Returns
-------
A daily perf packet.
"""
completed_session = self._current_session
if self.emission_rate == 'daily':
# this method is called for both minutely and daily emissions, but
# this chunk of code here only applies for daily emissions. (since
# it's done every minute, elsewhere, for minutely emission).
self.sync_last_sale_prices(dt, data_portal)
session_ix = self._session_count
# increment the day counter before we move markers forward.
self._session_count += 1
packet = {
'period_start': self._first_session,
'period_end': self._last_session,
'capital_base': self._capital_base,
'daily_perf': {
'period_open': self._market_open,
'period_close': dt,
},
'cumulative_perf': {
'period_open': self._first_session,
'period_close': self._last_session,
},
'progress': self._progress(self),
'cumulative_risk_metrics': {},
}
ledger = self._ledger
ledger.end_of_session(session_ix)
self.end_of_session(
packet,
ledger,
completed_session,
session_ix,
data_portal,
)
return packet | [
"def",
"handle_market_close",
"(",
"self",
",",
"dt",
",",
"data_portal",
")",
":",
"completed_session",
"=",
"self",
".",
"_current_session",
"if",
"self",
".",
"emission_rate",
"==",
"'daily'",
":",
"# this method is called for both minutely and daily emissions, but",
"# this chunk of code here only applies for daily emissions. (since",
"# it's done every minute, elsewhere, for minutely emission).",
"self",
".",
"sync_last_sale_prices",
"(",
"dt",
",",
"data_portal",
")",
"session_ix",
"=",
"self",
".",
"_session_count",
"# increment the day counter before we move markers forward.",
"self",
".",
"_session_count",
"+=",
"1",
"packet",
"=",
"{",
"'period_start'",
":",
"self",
".",
"_first_session",
",",
"'period_end'",
":",
"self",
".",
"_last_session",
",",
"'capital_base'",
":",
"self",
".",
"_capital_base",
",",
"'daily_perf'",
":",
"{",
"'period_open'",
":",
"self",
".",
"_market_open",
",",
"'period_close'",
":",
"dt",
",",
"}",
",",
"'cumulative_perf'",
":",
"{",
"'period_open'",
":",
"self",
".",
"_first_session",
",",
"'period_close'",
":",
"self",
".",
"_last_session",
",",
"}",
",",
"'progress'",
":",
"self",
".",
"_progress",
"(",
"self",
")",
",",
"'cumulative_risk_metrics'",
":",
"{",
"}",
",",
"}",
"ledger",
"=",
"self",
".",
"_ledger",
"ledger",
".",
"end_of_session",
"(",
"session_ix",
")",
"self",
".",
"end_of_session",
"(",
"packet",
",",
"ledger",
",",
"completed_session",
",",
"session_ix",
",",
"data_portal",
",",
")",
"return",
"packet"
] | Handles the close of the given day.
Parameters
----------
dt : Timestamp
The most recently completed simulation datetime.
data_portal : DataPortal
The current data portal.
Returns
-------
A daily perf packet. | [
"Handles",
"the",
"close",
"of",
"the",
"given",
"day",
"."
] | python | train |
ArangoDB-Community/pyArango | pyArango/collection.py | https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L664-L675 | def validateField(cls, fieldName, value) :
"""checks if 'value' is valid for field 'fieldName'. If the validation is unsuccessful, raises a SchemaViolation or a ValidationError.
for nested dicts ex: {address : { street: xxx} }, fieldName can take the form address.street
"""
try :
valValue = Collection.validateField(fieldName, value)
except SchemaViolation as e:
if fieldName == "_from" or fieldName == "_to" :
return True
else :
raise e
return valValue | [
"def",
"validateField",
"(",
"cls",
",",
"fieldName",
",",
"value",
")",
":",
"try",
":",
"valValue",
"=",
"Collection",
".",
"validateField",
"(",
"fieldName",
",",
"value",
")",
"except",
"SchemaViolation",
"as",
"e",
":",
"if",
"fieldName",
"==",
"\"_from\"",
"or",
"fieldName",
"==",
"\"_to\"",
":",
"return",
"True",
"else",
":",
"raise",
"e",
"return",
"valValue"
] | checks if 'value' is valid for field 'fieldName'. If the validation is unsuccessful, raises a SchemaViolation or a ValidationError.
for nested dicts ex: {address : { street: xxx} }, fieldName can take the form address.street | [
"checks",
"if",
"value",
"is",
"valid",
"for",
"field",
"fieldName",
".",
"If",
"the",
"validation",
"is",
"unsuccessful",
"raises",
"a",
"SchemaViolation",
"or",
"a",
"ValidationError",
".",
"for",
"nested",
"dicts",
"ex",
":",
"{",
"address",
":",
"{",
"street",
":",
"xxx",
"}",
"}",
"fieldName",
"can",
"take",
"the",
"form",
"address",
".",
"street"
] | python | train |
numenta/nupic | src/nupic/engine/__init__.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/engine/__init__.py#L541-L549 | def setParameter(self, paramName, value):
"""Set parameter value"""
(setter, getter) = self._getParameterMethods(paramName)
if setter is None:
import exceptions
raise exceptions.Exception(
"setParameter -- parameter name '%s' does not exist in region %s of type %s"
% (paramName, self.name, self.type))
setter(paramName, value) | [
"def",
"setParameter",
"(",
"self",
",",
"paramName",
",",
"value",
")",
":",
"(",
"setter",
",",
"getter",
")",
"=",
"self",
".",
"_getParameterMethods",
"(",
"paramName",
")",
"if",
"setter",
"is",
"None",
":",
"import",
"exceptions",
"raise",
"exceptions",
".",
"Exception",
"(",
"\"setParameter -- parameter name '%s' does not exist in region %s of type %s\"",
"%",
"(",
"paramName",
",",
"self",
".",
"name",
",",
"self",
".",
"type",
")",
")",
"setter",
"(",
"paramName",
",",
"value",
")"
] | Set parameter value | [
"Set",
"parameter",
"value"
] | python | valid |
msuozzo/Aduro | aduro/events.py | https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/events.py#L69-L76 | def from_str(string):
"""Generate a `AddEvent` object from a string
"""
match = re.match(r'^ADD (\w+)$', string)
if match:
return AddEvent(match.group(1))
else:
raise EventParseError | [
"def",
"from_str",
"(",
"string",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'^ADD (\\w+)$'",
",",
"string",
")",
"if",
"match",
":",
"return",
"AddEvent",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"else",
":",
"raise",
"EventParseError"
] | Generate a `AddEvent` object from a string | [
"Generate",
"a",
"AddEvent",
"object",
"from",
"a",
"string"
] | python | train |
andrewda/frc-livescore | livescore/simpleocr_utils/processor.py | https://github.com/andrewda/frc-livescore/blob/71594cd6d2c8b6c5feb3889bb05552d09b8128b1/livescore/simpleocr_utils/processor.py#L66-L71 | def get_parameters(self):
"""returns a dictionary with the processor's stored parameters"""
parameter_names = self.PARAMETERS.keys()
# TODO: Unresolved reference for processor
parameter_values = [getattr(processor, n) for n in parameter_names]
return dict(zip(parameter_names, parameter_values)) | [
"def",
"get_parameters",
"(",
"self",
")",
":",
"parameter_names",
"=",
"self",
".",
"PARAMETERS",
".",
"keys",
"(",
")",
"# TODO: Unresolved reference for processor",
"parameter_values",
"=",
"[",
"getattr",
"(",
"processor",
",",
"n",
")",
"for",
"n",
"in",
"parameter_names",
"]",
"return",
"dict",
"(",
"zip",
"(",
"parameter_names",
",",
"parameter_values",
")",
")"
] | returns a dictionary with the processor's stored parameters | [
"returns",
"a",
"dictionary",
"with",
"the",
"processor",
"s",
"stored",
"parameters"
] | python | train |
jorgenschaefer/elpy | elpy/server.py | https://github.com/jorgenschaefer/elpy/blob/ffd982f829b11e53f2be187c7b770423341f29bc/elpy/server.py#L109-L114 | def rpc_get_definition(self, filename, source, offset):
"""Get the location of the definition for the symbol at the offset.
"""
return self._call_backend("rpc_get_definition", None, filename,
get_source(source), offset) | [
"def",
"rpc_get_definition",
"(",
"self",
",",
"filename",
",",
"source",
",",
"offset",
")",
":",
"return",
"self",
".",
"_call_backend",
"(",
"\"rpc_get_definition\"",
",",
"None",
",",
"filename",
",",
"get_source",
"(",
"source",
")",
",",
"offset",
")"
] | Get the location of the definition for the symbol at the offset. | [
"Get",
"the",
"location",
"of",
"the",
"definition",
"for",
"the",
"symbol",
"at",
"the",
"offset",
"."
] | python | train |
DataDog/integrations-core | spark/datadog_checks/spark/spark.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/spark/datadog_checks/spark/spark.py#L693-L711 | def _rest_request_to_json(self, address, object_path, service_name, requests_config, tags, *args, **kwargs):
"""
Query the given URL and return the JSON response
"""
response = self._rest_request(address, object_path, service_name, requests_config, tags, *args, **kwargs)
try:
response_json = response.json()
except JSONDecodeError as e:
self.service_check(
service_name,
AgentCheck.CRITICAL,
tags=['url:%s' % self._get_url_base(address)] + tags,
message='JSON Parse failed: {0}'.format(e),
)
raise
return response_json | [
"def",
"_rest_request_to_json",
"(",
"self",
",",
"address",
",",
"object_path",
",",
"service_name",
",",
"requests_config",
",",
"tags",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"self",
".",
"_rest_request",
"(",
"address",
",",
"object_path",
",",
"service_name",
",",
"requests_config",
",",
"tags",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"response_json",
"=",
"response",
".",
"json",
"(",
")",
"except",
"JSONDecodeError",
"as",
"e",
":",
"self",
".",
"service_check",
"(",
"service_name",
",",
"AgentCheck",
".",
"CRITICAL",
",",
"tags",
"=",
"[",
"'url:%s'",
"%",
"self",
".",
"_get_url_base",
"(",
"address",
")",
"]",
"+",
"tags",
",",
"message",
"=",
"'JSON Parse failed: {0}'",
".",
"format",
"(",
"e",
")",
",",
")",
"raise",
"return",
"response_json"
] | Query the given URL and return the JSON response | [
"Query",
"the",
"given",
"URL",
"and",
"return",
"the",
"JSON",
"response"
] | python | train |
HttpRunner/HttpRunner | httprunner/report.py | https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/report.py#L218-L245 | def __expand_meta_datas(meta_datas, meta_datas_expanded):
""" expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
>>> meta_datas_expanded = []
>>> __expand_meta_datas(meta_datas, meta_datas_expanded)
>>> print(meta_datas_expanded)
[dict1, dict2, dict3]
"""
if isinstance(meta_datas, dict):
meta_datas_expanded.append(meta_datas)
elif isinstance(meta_datas, list):
for meta_data in meta_datas:
__expand_meta_datas(meta_data, meta_datas_expanded) | [
"def",
"__expand_meta_datas",
"(",
"meta_datas",
",",
"meta_datas_expanded",
")",
":",
"if",
"isinstance",
"(",
"meta_datas",
",",
"dict",
")",
":",
"meta_datas_expanded",
".",
"append",
"(",
"meta_datas",
")",
"elif",
"isinstance",
"(",
"meta_datas",
",",
"list",
")",
":",
"for",
"meta_data",
"in",
"meta_datas",
":",
"__expand_meta_datas",
"(",
"meta_data",
",",
"meta_datas_expanded",
")"
] | expand meta_datas to one level
Args:
meta_datas (dict/list): maybe in nested format
Returns:
list: expanded list in one level
Examples:
>>> meta_datas = [
[
dict1,
dict2
],
dict3
]
>>> meta_datas_expanded = []
>>> __expand_meta_datas(meta_datas, meta_datas_expanded)
>>> print(meta_datas_expanded)
[dict1, dict2, dict3] | [
"expand",
"meta_datas",
"to",
"one",
"level"
] | python | train |
siznax/wptools | wptools/page.py | https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/page.py#L491-L540 | def get(self, show=True, proxy=None, timeout=0):
"""
Make Mediawiki, RESTBase, and Wikidata requests for page data
some sequence of:
- get_parse()
- get_query()
- get_restbase()
- get_wikidata()
"""
wikibase = self.params.get('wikibase')
if wikibase:
self.flags['defer_imageinfo'] = True
self.get_wikidata(False, proxy, timeout)
self.get_query(False, proxy, timeout)
self.get_parse(False, proxy, timeout)
self.flags['defer_imageinfo'] = False
self.get_restbase('/page/summary/', False, proxy, timeout)
if show and not self.flags.get('silent'):
self.show()
else:
self.flags['defer_imageinfo'] = True
self.get_query(False, proxy, timeout)
self.get_parse(False, proxy, timeout)
if not self.data.get('wikibase'):
self.skip_action('wikidata')
self.get_wikidata(False, proxy, timeout)
self.flags['defer_imageinfo'] = False
wiki = self.params.get('wiki')
if wiki and 'wikipedia.org' not in wiki:
self.skip_action('restbase')
self.get_restbase('/page/summary/', False, proxy, timeout)
if show and not self.flags.get('silent'):
self.show()
return self | [
"def",
"get",
"(",
"self",
",",
"show",
"=",
"True",
",",
"proxy",
"=",
"None",
",",
"timeout",
"=",
"0",
")",
":",
"wikibase",
"=",
"self",
".",
"params",
".",
"get",
"(",
"'wikibase'",
")",
"if",
"wikibase",
":",
"self",
".",
"flags",
"[",
"'defer_imageinfo'",
"]",
"=",
"True",
"self",
".",
"get_wikidata",
"(",
"False",
",",
"proxy",
",",
"timeout",
")",
"self",
".",
"get_query",
"(",
"False",
",",
"proxy",
",",
"timeout",
")",
"self",
".",
"get_parse",
"(",
"False",
",",
"proxy",
",",
"timeout",
")",
"self",
".",
"flags",
"[",
"'defer_imageinfo'",
"]",
"=",
"False",
"self",
".",
"get_restbase",
"(",
"'/page/summary/'",
",",
"False",
",",
"proxy",
",",
"timeout",
")",
"if",
"show",
"and",
"not",
"self",
".",
"flags",
".",
"get",
"(",
"'silent'",
")",
":",
"self",
".",
"show",
"(",
")",
"else",
":",
"self",
".",
"flags",
"[",
"'defer_imageinfo'",
"]",
"=",
"True",
"self",
".",
"get_query",
"(",
"False",
",",
"proxy",
",",
"timeout",
")",
"self",
".",
"get_parse",
"(",
"False",
",",
"proxy",
",",
"timeout",
")",
"if",
"not",
"self",
".",
"data",
".",
"get",
"(",
"'wikibase'",
")",
":",
"self",
".",
"skip_action",
"(",
"'wikidata'",
")",
"self",
".",
"get_wikidata",
"(",
"False",
",",
"proxy",
",",
"timeout",
")",
"self",
".",
"flags",
"[",
"'defer_imageinfo'",
"]",
"=",
"False",
"wiki",
"=",
"self",
".",
"params",
".",
"get",
"(",
"'wiki'",
")",
"if",
"wiki",
"and",
"'wikipedia.org'",
"not",
"in",
"wiki",
":",
"self",
".",
"skip_action",
"(",
"'restbase'",
")",
"self",
".",
"get_restbase",
"(",
"'/page/summary/'",
",",
"False",
",",
"proxy",
",",
"timeout",
")",
"if",
"show",
"and",
"not",
"self",
".",
"flags",
".",
"get",
"(",
"'silent'",
")",
":",
"self",
".",
"show",
"(",
")",
"return",
"self"
] | Make Mediawiki, RESTBase, and Wikidata requests for page data
some sequence of:
- get_parse()
- get_query()
- get_restbase()
- get_wikidata() | [
"Make",
"Mediawiki",
"RESTBase",
"and",
"Wikidata",
"requests",
"for",
"page",
"data",
"some",
"sequence",
"of",
":",
"-",
"get_parse",
"()",
"-",
"get_query",
"()",
"-",
"get_restbase",
"()",
"-",
"get_wikidata",
"()"
] | python | train |
openstack/horizon | openstack_auth/user.py | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/user.py#L344-L357 | def has_a_matching_perm(self, perm_list, obj=None):
"""Returns True if the user has one of the specified permissions.
If object is passed, it checks if the user has any of the required
perms for this object.
"""
# If there are no permissions to check, just return true
if not perm_list:
return True
# Check that user has at least one of the required permissions.
for perm in perm_list:
if self.has_perm(perm, obj):
return True
return False | [
"def",
"has_a_matching_perm",
"(",
"self",
",",
"perm_list",
",",
"obj",
"=",
"None",
")",
":",
"# If there are no permissions to check, just return true",
"if",
"not",
"perm_list",
":",
"return",
"True",
"# Check that user has at least one of the required permissions.",
"for",
"perm",
"in",
"perm_list",
":",
"if",
"self",
".",
"has_perm",
"(",
"perm",
",",
"obj",
")",
":",
"return",
"True",
"return",
"False"
] | Returns True if the user has one of the specified permissions.
If object is passed, it checks if the user has any of the required
perms for this object. | [
"Returns",
"True",
"if",
"the",
"user",
"has",
"one",
"of",
"the",
"specified",
"permissions",
"."
] | python | train |
mdorn/pyinstapaper | pyinstapaper/instapaper.py | https://github.com/mdorn/pyinstapaper/blob/94f5f61ccd07079ba3967f788c555aea1a81cca5/pyinstapaper/instapaper.py#L34-L52 | def login(self, username, password):
'''Authenticate using XAuth variant of OAuth.
:param str username: Username or email address for the relevant account
:param str password: Password for the account
'''
response = self.request(
ACCESS_TOKEN,
{
'x_auth_mode': 'client_auth',
'x_auth_username': username,
'x_auth_password': password
},
returns_json=False
)
token = dict(parse_qsl(response['data'].decode()))
self.token = oauth.Token(
token['oauth_token'], token['oauth_token_secret'])
self.oauth_client = oauth.Client(self.consumer, self.token) | [
"def",
"login",
"(",
"self",
",",
"username",
",",
"password",
")",
":",
"response",
"=",
"self",
".",
"request",
"(",
"ACCESS_TOKEN",
",",
"{",
"'x_auth_mode'",
":",
"'client_auth'",
",",
"'x_auth_username'",
":",
"username",
",",
"'x_auth_password'",
":",
"password",
"}",
",",
"returns_json",
"=",
"False",
")",
"token",
"=",
"dict",
"(",
"parse_qsl",
"(",
"response",
"[",
"'data'",
"]",
".",
"decode",
"(",
")",
")",
")",
"self",
".",
"token",
"=",
"oauth",
".",
"Token",
"(",
"token",
"[",
"'oauth_token'",
"]",
",",
"token",
"[",
"'oauth_token_secret'",
"]",
")",
"self",
".",
"oauth_client",
"=",
"oauth",
".",
"Client",
"(",
"self",
".",
"consumer",
",",
"self",
".",
"token",
")"
] | Authenticate using XAuth variant of OAuth.
:param str username: Username or email address for the relevant account
:param str password: Password for the account | [
"Authenticate",
"using",
"XAuth",
"variant",
"of",
"OAuth",
"."
] | python | train |
planetlabs/planet-client-python | planet/api/client.py | https://github.com/planetlabs/planet-client-python/blob/1c62ce7d416819951dddee0c22068fef6d40b027/planet/api/client.py#L283-L292 | def get_mosaic_by_name(self, name):
'''Get the API representation of a mosaic by name.
:param name str: The name of the mosaic
:returns: :py:Class:`planet.api.models.Mosaics`
:raises planet.api.exceptions.APIException: On API error.
'''
params = {'name__is': name}
url = self._url('basemaps/v1/mosaics')
return self._get(url, models.Mosaics, params=params).get_body() | [
"def",
"get_mosaic_by_name",
"(",
"self",
",",
"name",
")",
":",
"params",
"=",
"{",
"'name__is'",
":",
"name",
"}",
"url",
"=",
"self",
".",
"_url",
"(",
"'basemaps/v1/mosaics'",
")",
"return",
"self",
".",
"_get",
"(",
"url",
",",
"models",
".",
"Mosaics",
",",
"params",
"=",
"params",
")",
".",
"get_body",
"(",
")"
] | Get the API representation of a mosaic by name.
:param name str: The name of the mosaic
:returns: :py:Class:`planet.api.models.Mosaics`
:raises planet.api.exceptions.APIException: On API error. | [
"Get",
"the",
"API",
"representation",
"of",
"a",
"mosaic",
"by",
"name",
"."
] | python | train |
rbuffat/pyepw | pyepw/epw.py | https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L2791-L2811 | def ws050(self, value=None):
""" Corresponds to IDD Field `ws050`
Wind speed corresponding 5.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `ws050`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `ws050`'.format(value))
self._ws050 = value | [
"def",
"ws050",
"(",
"self",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'value {} need to be of type float '",
"'for field `ws050`'",
".",
"format",
"(",
"value",
")",
")",
"self",
".",
"_ws050",
"=",
"value"
] | Corresponds to IDD Field `ws050`
Wind speed corresponding 5.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `ws050`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | [
"Corresponds",
"to",
"IDD",
"Field",
"ws050",
"Wind",
"speed",
"corresponding",
"5",
".",
"0%",
"annual",
"cumulative",
"frequency",
"of",
"occurrence"
] | python | train |
NYUCCL/psiTurk | psiturk/experiment.py | https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L128-L176 | def get_random_condcount(mode):
"""
HITs can be in one of three states:
- jobs that are finished
- jobs that are started but not finished
- jobs that are never going to finish (user decided not to do it)
Our count should be based on the first two, so we count any tasks finished
or any tasks not finished that were started in the last cutoff_time
minutes, as specified in the cutoff_time variable in the config file.
Returns a tuple: (cond, condition)
"""
cutofftime = datetime.timedelta(minutes=-CONFIG.getint('Server Parameters',
'cutoff_time'))
starttime = datetime.datetime.now() + cutofftime
try:
conditions = json.load(open(os.path.join(app.root_path, 'conditions.json')))
numconds = len(conditions.keys())
numcounts = 1
except IOError as e:
numconds = CONFIG.getint('Task Parameters', 'num_conds')
numcounts = CONFIG.getint('Task Parameters', 'num_counters')
participants = Participant.query.\
filter(Participant.codeversion == \
CONFIG.get('Task Parameters', 'experiment_code_version')).\
filter(Participant.mode == mode).\
filter(or_(Participant.status == COMPLETED,
Participant.status == CREDITED,
Participant.status == SUBMITTED,
Participant.status == BONUSED,
Participant.beginhit > starttime)).all()
counts = Counter()
for cond in range(numconds):
for counter in range(numcounts):
counts[(cond, counter)] = 0
for participant in participants:
condcount = (participant.cond, participant.counterbalance)
if condcount in counts:
counts[condcount] += 1
mincount = min(counts.values())
minima = [hsh for hsh, count in counts.iteritems() if count == mincount]
chosen = choice(minima)
#conds += [ 0 for _ in range(1000) ]
#conds += [ 1 for _ in range(1000) ]
app.logger.info("given %(a)s chose %(b)s" % {'a': counts, 'b': chosen})
return chosen | [
"def",
"get_random_condcount",
"(",
"mode",
")",
":",
"cutofftime",
"=",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"-",
"CONFIG",
".",
"getint",
"(",
"'Server Parameters'",
",",
"'cutoff_time'",
")",
")",
"starttime",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"+",
"cutofftime",
"try",
":",
"conditions",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"app",
".",
"root_path",
",",
"'conditions.json'",
")",
")",
")",
"numconds",
"=",
"len",
"(",
"conditions",
".",
"keys",
"(",
")",
")",
"numcounts",
"=",
"1",
"except",
"IOError",
"as",
"e",
":",
"numconds",
"=",
"CONFIG",
".",
"getint",
"(",
"'Task Parameters'",
",",
"'num_conds'",
")",
"numcounts",
"=",
"CONFIG",
".",
"getint",
"(",
"'Task Parameters'",
",",
"'num_counters'",
")",
"participants",
"=",
"Participant",
".",
"query",
".",
"filter",
"(",
"Participant",
".",
"codeversion",
"==",
"CONFIG",
".",
"get",
"(",
"'Task Parameters'",
",",
"'experiment_code_version'",
")",
")",
".",
"filter",
"(",
"Participant",
".",
"mode",
"==",
"mode",
")",
".",
"filter",
"(",
"or_",
"(",
"Participant",
".",
"status",
"==",
"COMPLETED",
",",
"Participant",
".",
"status",
"==",
"CREDITED",
",",
"Participant",
".",
"status",
"==",
"SUBMITTED",
",",
"Participant",
".",
"status",
"==",
"BONUSED",
",",
"Participant",
".",
"beginhit",
">",
"starttime",
")",
")",
".",
"all",
"(",
")",
"counts",
"=",
"Counter",
"(",
")",
"for",
"cond",
"in",
"range",
"(",
"numconds",
")",
":",
"for",
"counter",
"in",
"range",
"(",
"numcounts",
")",
":",
"counts",
"[",
"(",
"cond",
",",
"counter",
")",
"]",
"=",
"0",
"for",
"participant",
"in",
"participants",
":",
"condcount",
"=",
"(",
"participant",
".",
"cond",
",",
"participant",
".",
"counterbalance",
")",
"if",
"condcount",
"in",
"counts",
":",
"counts",
"[",
"condcount",
"]",
"+=",
"1",
"mincount",
"=",
"min",
"(",
"counts",
".",
"values",
"(",
")",
")",
"minima",
"=",
"[",
"hsh",
"for",
"hsh",
",",
"count",
"in",
"counts",
".",
"iteritems",
"(",
")",
"if",
"count",
"==",
"mincount",
"]",
"chosen",
"=",
"choice",
"(",
"minima",
")",
"#conds += [ 0 for _ in range(1000) ]",
"#conds += [ 1 for _ in range(1000) ]",
"app",
".",
"logger",
".",
"info",
"(",
"\"given %(a)s chose %(b)s\"",
"%",
"{",
"'a'",
":",
"counts",
",",
"'b'",
":",
"chosen",
"}",
")",
"return",
"chosen"
] | HITs can be in one of three states:
- jobs that are finished
- jobs that are started but not finished
- jobs that are never going to finish (user decided not to do it)
Our count should be based on the first two, so we count any tasks finished
or any tasks not finished that were started in the last cutoff_time
minutes, as specified in the cutoff_time variable in the config file.
Returns a tuple: (cond, condition) | [
"HITs",
"can",
"be",
"in",
"one",
"of",
"three",
"states",
":",
"-",
"jobs",
"that",
"are",
"finished",
"-",
"jobs",
"that",
"are",
"started",
"but",
"not",
"finished",
"-",
"jobs",
"that",
"are",
"never",
"going",
"to",
"finish",
"(",
"user",
"decided",
"not",
"to",
"do",
"it",
")",
"Our",
"count",
"should",
"be",
"based",
"on",
"the",
"first",
"two",
"so",
"we",
"count",
"any",
"tasks",
"finished",
"or",
"any",
"tasks",
"not",
"finished",
"that",
"were",
"started",
"in",
"the",
"last",
"cutoff_time",
"minutes",
"as",
"specified",
"in",
"the",
"cutoff_time",
"variable",
"in",
"the",
"config",
"file",
"."
] | python | train |
nicolargo/glances | glances/amps/glances_amp.py | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/amps/glances_amp.py#L179-L186 | def set_result(self, result, separator=''):
"""Store the result (string) into the result key of the AMP
if one_line is true then replace \n by separator
"""
if self.one_line():
self.configs['result'] = str(result).replace('\n', separator)
else:
self.configs['result'] = str(result) | [
"def",
"set_result",
"(",
"self",
",",
"result",
",",
"separator",
"=",
"''",
")",
":",
"if",
"self",
".",
"one_line",
"(",
")",
":",
"self",
".",
"configs",
"[",
"'result'",
"]",
"=",
"str",
"(",
"result",
")",
".",
"replace",
"(",
"'\\n'",
",",
"separator",
")",
"else",
":",
"self",
".",
"configs",
"[",
"'result'",
"]",
"=",
"str",
"(",
"result",
")"
] | Store the result (string) into the result key of the AMP
if one_line is true then replace \n by separator | [
"Store",
"the",
"result",
"(",
"string",
")",
"into",
"the",
"result",
"key",
"of",
"the",
"AMP",
"if",
"one_line",
"is",
"true",
"then",
"replace",
"\\",
"n",
"by",
"separator"
] | python | train |
ARMmbed/autoversion | src/auto_version/auto_version_tool.py | https://github.com/ARMmbed/autoversion/blob/c5b127d2059c8219f5637fe45bf9e1be3a0af2aa/src/auto_version/auto_version_tool.py#L98-L108 | def detect_file_triggers(trigger_patterns):
"""The existence of files matching configured globs will trigger a version bump"""
triggers = set()
for trigger, pattern in trigger_patterns.items():
matches = glob.glob(pattern)
if matches:
_LOG.debug("trigger: %s bump from %r\n\t%s", trigger, pattern, matches)
triggers.add(trigger)
else:
_LOG.debug("trigger: no match on %r", pattern)
return triggers | [
"def",
"detect_file_triggers",
"(",
"trigger_patterns",
")",
":",
"triggers",
"=",
"set",
"(",
")",
"for",
"trigger",
",",
"pattern",
"in",
"trigger_patterns",
".",
"items",
"(",
")",
":",
"matches",
"=",
"glob",
".",
"glob",
"(",
"pattern",
")",
"if",
"matches",
":",
"_LOG",
".",
"debug",
"(",
"\"trigger: %s bump from %r\\n\\t%s\"",
",",
"trigger",
",",
"pattern",
",",
"matches",
")",
"triggers",
".",
"add",
"(",
"trigger",
")",
"else",
":",
"_LOG",
".",
"debug",
"(",
"\"trigger: no match on %r\"",
",",
"pattern",
")",
"return",
"triggers"
] | The existence of files matching configured globs will trigger a version bump | [
"The",
"existence",
"of",
"files",
"matching",
"configured",
"globs",
"will",
"trigger",
"a",
"version",
"bump"
] | python | train |
ValvePython/steam | steam/client/user.py | https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/client/user.py#L85-L107 | def get_avatar_url(self, size=2):
"""Get URL to avatar picture
:param size: possible values are ``0``, ``1``, or ``2`` corresponding to small, medium, large
:type size: :class:`int`
:return: url to avatar
:rtype: :class:`str`
"""
hashbytes = self.get_ps('avatar_hash')
if hashbytes != "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000":
ahash = hexlify(hashbytes).decode('ascii')
else:
ahash = 'fef49e7fa7e1997310d705b2a6158ff8dc1cdfeb'
sizes = {
0: '',
1: '_medium',
2: '_full',
}
url = "http://cdn.akamai.steamstatic.com/steamcommunity/public/images/avatars/%s/%s%s.jpg"
return url % (ahash[:2], ahash, sizes[size]) | [
"def",
"get_avatar_url",
"(",
"self",
",",
"size",
"=",
"2",
")",
":",
"hashbytes",
"=",
"self",
".",
"get_ps",
"(",
"'avatar_hash'",
")",
"if",
"hashbytes",
"!=",
"\"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"",
":",
"ahash",
"=",
"hexlify",
"(",
"hashbytes",
")",
".",
"decode",
"(",
"'ascii'",
")",
"else",
":",
"ahash",
"=",
"'fef49e7fa7e1997310d705b2a6158ff8dc1cdfeb'",
"sizes",
"=",
"{",
"0",
":",
"''",
",",
"1",
":",
"'_medium'",
",",
"2",
":",
"'_full'",
",",
"}",
"url",
"=",
"\"http://cdn.akamai.steamstatic.com/steamcommunity/public/images/avatars/%s/%s%s.jpg\"",
"return",
"url",
"%",
"(",
"ahash",
"[",
":",
"2",
"]",
",",
"ahash",
",",
"sizes",
"[",
"size",
"]",
")"
] | Get URL to avatar picture
:param size: possible values are ``0``, ``1``, or ``2`` corresponding to small, medium, large
:type size: :class:`int`
:return: url to avatar
:rtype: :class:`str` | [
"Get",
"URL",
"to",
"avatar",
"picture"
] | python | train |
Contraz/demosys-py | demosys/loaders/data/text.py | https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/loaders/data/text.py#L8-L18 | def load(self):
"""Load a file in text mode"""
self.meta.resolved_path = self.find_data(self.meta.path)
if not self.meta.resolved_path:
raise ImproperlyConfigured("Data file '{}' not found".format(self.meta.path))
print("Loading:", self.meta.path)
with open(self.meta.resolved_path, 'r') as fd:
return fd.read() | [
"def",
"load",
"(",
"self",
")",
":",
"self",
".",
"meta",
".",
"resolved_path",
"=",
"self",
".",
"find_data",
"(",
"self",
".",
"meta",
".",
"path",
")",
"if",
"not",
"self",
".",
"meta",
".",
"resolved_path",
":",
"raise",
"ImproperlyConfigured",
"(",
"\"Data file '{}' not found\"",
".",
"format",
"(",
"self",
".",
"meta",
".",
"path",
")",
")",
"print",
"(",
"\"Loading:\"",
",",
"self",
".",
"meta",
".",
"path",
")",
"with",
"open",
"(",
"self",
".",
"meta",
".",
"resolved_path",
",",
"'r'",
")",
"as",
"fd",
":",
"return",
"fd",
".",
"read",
"(",
")"
] | Load a file in text mode | [
"Load",
"a",
"file",
"in",
"text",
"mode"
] | python | valid |
ejeschke/ginga | ginga/misc/Bunch.py | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Bunch.py#L497-L506 | def get(self, key, alt=None):
"""If dictionary contains _key_ return the associated value,
otherwise return _alt_.
"""
with self.lock:
if key in self:
return self.getitem(key)
else:
return alt | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"alt",
"=",
"None",
")",
":",
"with",
"self",
".",
"lock",
":",
"if",
"key",
"in",
"self",
":",
"return",
"self",
".",
"getitem",
"(",
"key",
")",
"else",
":",
"return",
"alt"
] | If dictionary contains _key_ return the associated value,
otherwise return _alt_. | [
"If",
"dictionary",
"contains",
"_key_",
"return",
"the",
"associated",
"value",
"otherwise",
"return",
"_alt_",
"."
] | python | train |
spyder-ide/conda-manager | conda_manager/api/conda_api.py | https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/conda_api.py#L440-L448 | def info(self, abspath=True):
"""
Return a dictionary with configuration information.
No guarantee is made about which keys exist. Therefore this function
should only be used for testing and debugging.
"""
logger.debug(str(''))
return self._call_and_parse(['info', '--json'], abspath=abspath) | [
"def",
"info",
"(",
"self",
",",
"abspath",
"=",
"True",
")",
":",
"logger",
".",
"debug",
"(",
"str",
"(",
"''",
")",
")",
"return",
"self",
".",
"_call_and_parse",
"(",
"[",
"'info'",
",",
"'--json'",
"]",
",",
"abspath",
"=",
"abspath",
")"
] | Return a dictionary with configuration information.
No guarantee is made about which keys exist. Therefore this function
should only be used for testing and debugging. | [
"Return",
"a",
"dictionary",
"with",
"configuration",
"information",
"."
] | python | train |
nicolargo/glances | glances/password.py | https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/password.py#L49-L51 | def get_hash(self, salt, plain_password):
"""Return the hashed password, salt + SHA-256."""
return hashlib.sha256(salt.encode() + plain_password.encode()).hexdigest() | [
"def",
"get_hash",
"(",
"self",
",",
"salt",
",",
"plain_password",
")",
":",
"return",
"hashlib",
".",
"sha256",
"(",
"salt",
".",
"encode",
"(",
")",
"+",
"plain_password",
".",
"encode",
"(",
")",
")",
".",
"hexdigest",
"(",
")"
] | Return the hashed password, salt + SHA-256. | [
"Return",
"the",
"hashed",
"password",
"salt",
"+",
"SHA",
"-",
"256",
"."
] | python | train |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/images.py | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L19-L34 | def move_images_to_cache(source, destination):
"""
Handles the movement of images to the cache. Must be helpful if it finds
that the folder for this article already exists.
"""
if os.path.isdir(destination):
log.debug('Cached images for this article already exist')
return
else:
log.debug('Cache location: {0}'.format(destination))
try:
shutil.copytree(source, destination)
except:
log.exception('Images could not be moved to cache')
else:
log.info('Moved images to cache'.format(destination)) | [
"def",
"move_images_to_cache",
"(",
"source",
",",
"destination",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"destination",
")",
":",
"log",
".",
"debug",
"(",
"'Cached images for this article already exist'",
")",
"return",
"else",
":",
"log",
".",
"debug",
"(",
"'Cache location: {0}'",
".",
"format",
"(",
"destination",
")",
")",
"try",
":",
"shutil",
".",
"copytree",
"(",
"source",
",",
"destination",
")",
"except",
":",
"log",
".",
"exception",
"(",
"'Images could not be moved to cache'",
")",
"else",
":",
"log",
".",
"info",
"(",
"'Moved images to cache'",
".",
"format",
"(",
"destination",
")",
")"
] | Handles the movement of images to the cache. Must be helpful if it finds
that the folder for this article already exists. | [
"Handles",
"the",
"movement",
"of",
"images",
"to",
"the",
"cache",
".",
"Must",
"be",
"helpful",
"if",
"it",
"finds",
"that",
"the",
"folder",
"for",
"this",
"article",
"already",
"exists",
"."
] | python | train |
josiah-wolf-oberholtzer/uqbar | uqbar/cli/CLIAggregator.py | https://github.com/josiah-wolf-oberholtzer/uqbar/blob/eca7fefebbbee1e2ae13bf5d6baa838be66b1db6/uqbar/cli/CLIAggregator.py#L160-L213 | def cli_aliases(self):
r"""Developer script aliases.
"""
scripting_groups = []
aliases = {}
for cli_class in self.cli_classes:
instance = cli_class()
if getattr(instance, "alias", None):
scripting_group = getattr(instance, "scripting_group", None)
if scripting_group:
scripting_groups.append(scripting_group)
entry = (scripting_group, instance.alias)
if (scripting_group,) in aliases:
message = "alias conflict between scripting group"
message += " {!r} and {}"
message = message.format(
scripting_group, aliases[(scripting_group,)].__name__
)
raise Exception(message)
if entry in aliases:
message = "alias conflict between {} and {}"
message = message.format(
aliases[entry].__name__, cli_class.__name__
)
raise Exception(message)
aliases[entry] = cli_class
else:
entry = (instance.alias,)
if entry in scripting_groups:
message = "alias conflict between {}"
message += " and scripting group {!r}"
message = message.format(cli_class.__name__, instance.alias)
raise Exception(message)
if entry in aliases:
message = "alias conflict be {} and {}"
message = message.format(cli_class.__name__, aliases[entry])
raise Exception(message)
aliases[(instance.alias,)] = cli_class
else:
if instance.program_name in scripting_groups:
message = "Alias conflict between {}"
message += " and scripting group {!r}"
message = message.format(cli_class.__name__, instance.program_name)
raise Exception(message)
aliases[(instance.program_name,)] = cli_class
alias_map = {}
for key, value in aliases.items():
if len(key) == 1:
alias_map[key[0]] = value
else:
if key[0] not in alias_map:
alias_map[key[0]] = {}
alias_map[key[0]][key[1]] = value
return alias_map | [
"def",
"cli_aliases",
"(",
"self",
")",
":",
"scripting_groups",
"=",
"[",
"]",
"aliases",
"=",
"{",
"}",
"for",
"cli_class",
"in",
"self",
".",
"cli_classes",
":",
"instance",
"=",
"cli_class",
"(",
")",
"if",
"getattr",
"(",
"instance",
",",
"\"alias\"",
",",
"None",
")",
":",
"scripting_group",
"=",
"getattr",
"(",
"instance",
",",
"\"scripting_group\"",
",",
"None",
")",
"if",
"scripting_group",
":",
"scripting_groups",
".",
"append",
"(",
"scripting_group",
")",
"entry",
"=",
"(",
"scripting_group",
",",
"instance",
".",
"alias",
")",
"if",
"(",
"scripting_group",
",",
")",
"in",
"aliases",
":",
"message",
"=",
"\"alias conflict between scripting group\"",
"message",
"+=",
"\" {!r} and {}\"",
"message",
"=",
"message",
".",
"format",
"(",
"scripting_group",
",",
"aliases",
"[",
"(",
"scripting_group",
",",
")",
"]",
".",
"__name__",
")",
"raise",
"Exception",
"(",
"message",
")",
"if",
"entry",
"in",
"aliases",
":",
"message",
"=",
"\"alias conflict between {} and {}\"",
"message",
"=",
"message",
".",
"format",
"(",
"aliases",
"[",
"entry",
"]",
".",
"__name__",
",",
"cli_class",
".",
"__name__",
")",
"raise",
"Exception",
"(",
"message",
")",
"aliases",
"[",
"entry",
"]",
"=",
"cli_class",
"else",
":",
"entry",
"=",
"(",
"instance",
".",
"alias",
",",
")",
"if",
"entry",
"in",
"scripting_groups",
":",
"message",
"=",
"\"alias conflict between {}\"",
"message",
"+=",
"\" and scripting group {!r}\"",
"message",
"=",
"message",
".",
"format",
"(",
"cli_class",
".",
"__name__",
",",
"instance",
".",
"alias",
")",
"raise",
"Exception",
"(",
"message",
")",
"if",
"entry",
"in",
"aliases",
":",
"message",
"=",
"\"alias conflict be {} and {}\"",
"message",
"=",
"message",
".",
"format",
"(",
"cli_class",
".",
"__name__",
",",
"aliases",
"[",
"entry",
"]",
")",
"raise",
"Exception",
"(",
"message",
")",
"aliases",
"[",
"(",
"instance",
".",
"alias",
",",
")",
"]",
"=",
"cli_class",
"else",
":",
"if",
"instance",
".",
"program_name",
"in",
"scripting_groups",
":",
"message",
"=",
"\"Alias conflict between {}\"",
"message",
"+=",
"\" and scripting group {!r}\"",
"message",
"=",
"message",
".",
"format",
"(",
"cli_class",
".",
"__name__",
",",
"instance",
".",
"program_name",
")",
"raise",
"Exception",
"(",
"message",
")",
"aliases",
"[",
"(",
"instance",
".",
"program_name",
",",
")",
"]",
"=",
"cli_class",
"alias_map",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"aliases",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"key",
")",
"==",
"1",
":",
"alias_map",
"[",
"key",
"[",
"0",
"]",
"]",
"=",
"value",
"else",
":",
"if",
"key",
"[",
"0",
"]",
"not",
"in",
"alias_map",
":",
"alias_map",
"[",
"key",
"[",
"0",
"]",
"]",
"=",
"{",
"}",
"alias_map",
"[",
"key",
"[",
"0",
"]",
"]",
"[",
"key",
"[",
"1",
"]",
"]",
"=",
"value",
"return",
"alias_map"
] | r"""Developer script aliases. | [
"r",
"Developer",
"script",
"aliases",
"."
] | python | train |
d0c-s4vage/pfp | pfp/interp.py | https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L1771-L1819 | def _handle_assignment(self, node, scope, ctxt, stream):
"""Handle assignment nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
def add_op(x,y): x += y
def sub_op(x,y): x -= y
def div_op(x,y): x /= y
def mod_op(x,y): x %= y
def mul_op(x,y): x *= y
def xor_op(x,y): x ^= y
def and_op(x,y): x &= y
def or_op(x,y): x |= y
def lshift_op(x,y): x <<= y
def rshift_op(x,y): x >>= y
def assign_op(x,y): x._pfp__set_value(y)
switch = {
"+=" : add_op,
"-=" : sub_op,
"/=" : div_op,
"%=" : mod_op,
"*=" : mul_op,
"^=" : xor_op,
"&=" : and_op,
"|=" : or_op,
"<<=" : lshift_op,
">>=" : rshift_op,
"=" : assign_op
}
self._dlog("handling assignment")
field = self._handle_node(node.lvalue, scope, ctxt, stream)
self._dlog("field = {}".format(field))
value = self._handle_node(node.rvalue, scope, ctxt, stream)
if node.op is None:
self._dlog("value = {}".format(value))
field._pfp__set_value(value)
else:
self._dlog("value {}= {}".format(node.op, value))
if node.op not in switch:
raise errors.UnsupportedAssignmentOperator(node.coord, node.op)
switch[node.op](field, value) | [
"def",
"_handle_assignment",
"(",
"self",
",",
"node",
",",
"scope",
",",
"ctxt",
",",
"stream",
")",
":",
"def",
"add_op",
"(",
"x",
",",
"y",
")",
":",
"x",
"+=",
"y",
"def",
"sub_op",
"(",
"x",
",",
"y",
")",
":",
"x",
"-=",
"y",
"def",
"div_op",
"(",
"x",
",",
"y",
")",
":",
"x",
"/=",
"y",
"def",
"mod_op",
"(",
"x",
",",
"y",
")",
":",
"x",
"%=",
"y",
"def",
"mul_op",
"(",
"x",
",",
"y",
")",
":",
"x",
"*=",
"y",
"def",
"xor_op",
"(",
"x",
",",
"y",
")",
":",
"x",
"^=",
"y",
"def",
"and_op",
"(",
"x",
",",
"y",
")",
":",
"x",
"&=",
"y",
"def",
"or_op",
"(",
"x",
",",
"y",
")",
":",
"x",
"|=",
"y",
"def",
"lshift_op",
"(",
"x",
",",
"y",
")",
":",
"x",
"<<=",
"y",
"def",
"rshift_op",
"(",
"x",
",",
"y",
")",
":",
"x",
">>=",
"y",
"def",
"assign_op",
"(",
"x",
",",
"y",
")",
":",
"x",
".",
"_pfp__set_value",
"(",
"y",
")",
"switch",
"=",
"{",
"\"+=\"",
":",
"add_op",
",",
"\"-=\"",
":",
"sub_op",
",",
"\"/=\"",
":",
"div_op",
",",
"\"%=\"",
":",
"mod_op",
",",
"\"*=\"",
":",
"mul_op",
",",
"\"^=\"",
":",
"xor_op",
",",
"\"&=\"",
":",
"and_op",
",",
"\"|=\"",
":",
"or_op",
",",
"\"<<=\"",
":",
"lshift_op",
",",
"\">>=\"",
":",
"rshift_op",
",",
"\"=\"",
":",
"assign_op",
"}",
"self",
".",
"_dlog",
"(",
"\"handling assignment\"",
")",
"field",
"=",
"self",
".",
"_handle_node",
"(",
"node",
".",
"lvalue",
",",
"scope",
",",
"ctxt",
",",
"stream",
")",
"self",
".",
"_dlog",
"(",
"\"field = {}\"",
".",
"format",
"(",
"field",
")",
")",
"value",
"=",
"self",
".",
"_handle_node",
"(",
"node",
".",
"rvalue",
",",
"scope",
",",
"ctxt",
",",
"stream",
")",
"if",
"node",
".",
"op",
"is",
"None",
":",
"self",
".",
"_dlog",
"(",
"\"value = {}\"",
".",
"format",
"(",
"value",
")",
")",
"field",
".",
"_pfp__set_value",
"(",
"value",
")",
"else",
":",
"self",
".",
"_dlog",
"(",
"\"value {}= {}\"",
".",
"format",
"(",
"node",
".",
"op",
",",
"value",
")",
")",
"if",
"node",
".",
"op",
"not",
"in",
"switch",
":",
"raise",
"errors",
".",
"UnsupportedAssignmentOperator",
"(",
"node",
".",
"coord",
",",
"node",
".",
"op",
")",
"switch",
"[",
"node",
".",
"op",
"]",
"(",
"field",
",",
"value",
")"
] | Handle assignment nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | [
"Handle",
"assignment",
"nodes"
] | python | train |
serkanyersen/underscore.py | src/underscore.py | https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L300-L312 | def invoke(self, method, *args):
""" Invoke a method (with arguments) on every item in a collection.
"""
def inv(value, *ar):
if (
_(method).isFunction() or
_(method).isLambda() or
_(method).isMethod()
):
return method(value, *args)
else:
return getattr(value, method)(*args)
return self._wrap(self._clean.map(inv)) | [
"def",
"invoke",
"(",
"self",
",",
"method",
",",
"*",
"args",
")",
":",
"def",
"inv",
"(",
"value",
",",
"*",
"ar",
")",
":",
"if",
"(",
"_",
"(",
"method",
")",
".",
"isFunction",
"(",
")",
"or",
"_",
"(",
"method",
")",
".",
"isLambda",
"(",
")",
"or",
"_",
"(",
"method",
")",
".",
"isMethod",
"(",
")",
")",
":",
"return",
"method",
"(",
"value",
",",
"*",
"args",
")",
"else",
":",
"return",
"getattr",
"(",
"value",
",",
"method",
")",
"(",
"*",
"args",
")",
"return",
"self",
".",
"_wrap",
"(",
"self",
".",
"_clean",
".",
"map",
"(",
"inv",
")",
")"
] | Invoke a method (with arguments) on every item in a collection. | [
"Invoke",
"a",
"method",
"(",
"with",
"arguments",
")",
"on",
"every",
"item",
"in",
"a",
"collection",
"."
] | python | train |
internetarchive/brozzler | brozzler/__init__.py | https://github.com/internetarchive/brozzler/blob/411b3f266a38b9bb942021c0121ebd8e5ca66447/brozzler/__init__.py#L174-L202 | def thread_exception_gate(thread=None):
'''
Returns a `ThreadExceptionGate` for `thread` (current thread by default).
`ThreadExceptionGate` is a context manager which allows exceptions to be
raised from threads other than the current one, by way of `thread_raise`.
Example:
try:
with thread_exception_gate():
# do something
except:
# handle exception....
If `thread_raise` is called on a thread that is not currently inside the
`ThreadExceptionGate` context (pep340 "runtime environment"), the exception
is queued, and raised immediately if and when the thread enters the
context. Only one exception will be queued this way at a time, others are
discarded.
'''
if not thread:
thread = threading.current_thread()
with _thread_exception_gates_lock:
if not thread in _thread_exception_gates:
_thread_exception_gates[thread] = ThreadExceptionGate(thread)
return _thread_exception_gates[thread] | [
"def",
"thread_exception_gate",
"(",
"thread",
"=",
"None",
")",
":",
"if",
"not",
"thread",
":",
"thread",
"=",
"threading",
".",
"current_thread",
"(",
")",
"with",
"_thread_exception_gates_lock",
":",
"if",
"not",
"thread",
"in",
"_thread_exception_gates",
":",
"_thread_exception_gates",
"[",
"thread",
"]",
"=",
"ThreadExceptionGate",
"(",
"thread",
")",
"return",
"_thread_exception_gates",
"[",
"thread",
"]"
] | Returns a `ThreadExceptionGate` for `thread` (current thread by default).
`ThreadExceptionGate` is a context manager which allows exceptions to be
raised from threads other than the current one, by way of `thread_raise`.
Example:
try:
with thread_exception_gate():
# do something
except:
# handle exception....
If `thread_raise` is called on a thread that is not currently inside the
`ThreadExceptionGate` context (pep340 "runtime environment"), the exception
is queued, and raised immediately if and when the thread enters the
context. Only one exception will be queued this way at a time, others are
discarded. | [
"Returns",
"a",
"ThreadExceptionGate",
"for",
"thread",
"(",
"current",
"thread",
"by",
"default",
")",
"."
] | python | train |
senaite/senaite.core | bika/lims/browser/reports/productivity_analysesperclient.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/reports/productivity_analysesperclient.py#L128-L137 | def add_filter_by_date(self, query, out_params):
"""Applies the filter by Requested date to the search query
"""
date_query = formatDateQuery(self.context, 'Requested')
if date_query:
query['created'] = date_query
requested = formatDateParms(self.context, 'Requested')
out_params.append({'title': _('Requested'),
'value': requested,
'type': 'text'}) | [
"def",
"add_filter_by_date",
"(",
"self",
",",
"query",
",",
"out_params",
")",
":",
"date_query",
"=",
"formatDateQuery",
"(",
"self",
".",
"context",
",",
"'Requested'",
")",
"if",
"date_query",
":",
"query",
"[",
"'created'",
"]",
"=",
"date_query",
"requested",
"=",
"formatDateParms",
"(",
"self",
".",
"context",
",",
"'Requested'",
")",
"out_params",
".",
"append",
"(",
"{",
"'title'",
":",
"_",
"(",
"'Requested'",
")",
",",
"'value'",
":",
"requested",
",",
"'type'",
":",
"'text'",
"}",
")"
] | Applies the filter by Requested date to the search query | [
"Applies",
"the",
"filter",
"by",
"Requested",
"date",
"to",
"the",
"search",
"query"
] | python | train |
ngmarchant/oasis | oasis/kad.py | https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/kad.py#L105-L131 | def _sample_item(self, **kwargs):
"""Sample an item from the pool according to the instrumental
distribution
"""
t = self.t_
if 'fixed_stratum' in kwargs:
stratum_idx = kwargs['fixed_stratum']
else:
stratum_idx = None
if stratum_idx is not None:
# Sample in given stratum
loc = self.strata._sample_in_stratum(stratum_idx, replace=False)
# Record instrumental distribution
if self.record_inst_hist:
self.inst_pmf_[stratum_idx,t] = 1
else:
# Choose stratum based on instrumental distribution
self._calc_inst_pmf()
if self.record_inst_hist:
inst_pmf = self.inst_pmf_[:,t]
else:
inst_pmf = self.inst_pmf_
loc, stratum_idx = self.strata.sample(pmf = inst_pmf, replace=False)
return loc, 1, {'stratum': stratum_idx} | [
"def",
"_sample_item",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"t",
"=",
"self",
".",
"t_",
"if",
"'fixed_stratum'",
"in",
"kwargs",
":",
"stratum_idx",
"=",
"kwargs",
"[",
"'fixed_stratum'",
"]",
"else",
":",
"stratum_idx",
"=",
"None",
"if",
"stratum_idx",
"is",
"not",
"None",
":",
"# Sample in given stratum",
"loc",
"=",
"self",
".",
"strata",
".",
"_sample_in_stratum",
"(",
"stratum_idx",
",",
"replace",
"=",
"False",
")",
"# Record instrumental distribution",
"if",
"self",
".",
"record_inst_hist",
":",
"self",
".",
"inst_pmf_",
"[",
"stratum_idx",
",",
"t",
"]",
"=",
"1",
"else",
":",
"# Choose stratum based on instrumental distribution",
"self",
".",
"_calc_inst_pmf",
"(",
")",
"if",
"self",
".",
"record_inst_hist",
":",
"inst_pmf",
"=",
"self",
".",
"inst_pmf_",
"[",
":",
",",
"t",
"]",
"else",
":",
"inst_pmf",
"=",
"self",
".",
"inst_pmf_",
"loc",
",",
"stratum_idx",
"=",
"self",
".",
"strata",
".",
"sample",
"(",
"pmf",
"=",
"inst_pmf",
",",
"replace",
"=",
"False",
")",
"return",
"loc",
",",
"1",
",",
"{",
"'stratum'",
":",
"stratum_idx",
"}"
] | Sample an item from the pool according to the instrumental
distribution | [
"Sample",
"an",
"item",
"from",
"the",
"pool",
"according",
"to",
"the",
"instrumental",
"distribution"
] | python | train |
mcieslik-mctp/papy | src/papy/core.py | https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/core.py#L925-L974 | def start(self, stages=None):
"""
Makes the ``Piper`` ready to return results. This involves starting the
the provided ``NuMap`` instance. If multiple ``Pipers`` share a
``NuMap`` instance the order in which these ``Pipers`` are started is
important. The valid order is upstream before downstream. The ``NuMap``
instance can only be started once, but the process can be done in 2
stages. This methods "stages" argument is a ``tuple`` which can contain
any the numbers ``0`` and/or ``1`` and/or ``2`` specifying which stage
of the start routine should be carried out:
- stage 0 - creates the needed ``itertools.tee`` objects.
- stage 1 - activates ``NuMap`` pool. A call to ``next`` will block..
- stage 2 - activates ``NuMap`` pool managers.
If this ``Piper`` shares a ``NuMap`` with other ``Pipers`` the proper
way to start them is to start them in a valid postorder with stages
``(0, 1)`` and ``(2,)`` separately.
Arguments:
- stages(tuple) [default: ``(0,)`` if linear; ``(0,1,2)`` if parallel]
Performs the specified stages of the start of a ``Piper`` instance.
Stage ``0`` is necessary and sufficient to start a linear ``Piper``
which uses an ``itertools.imap``. Stages ``1`` and ``2`` are
required to start any parallel ``Piper`` instance.
"""
# defaults differ linear vs. parallel
stages = stages or ((0,) if self.imap is imap else (0, 1, 2))
if not self.connected:
self.log.error('Piper %s is not connected.' % self)
raise PiperError('Piper %s is not connected.' % self)
if not self.started:
if 0 in stages:
self.tees.extend(tee(self, self.tee_num))
if hasattr(self.imap, 'start'):
# parallel piper
self.imap.start(stages)
if 2 in stages:
self.log.debug('Piper %s has been started using %s' % \
(self, self.imap))
self.started = True
else:
# linear piper
self.log.debug('Piper %s has been started using %s' % \
(self, self.imap))
self.started = True | [
"def",
"start",
"(",
"self",
",",
"stages",
"=",
"None",
")",
":",
"# defaults differ linear vs. parallel",
"stages",
"=",
"stages",
"or",
"(",
"(",
"0",
",",
")",
"if",
"self",
".",
"imap",
"is",
"imap",
"else",
"(",
"0",
",",
"1",
",",
"2",
")",
")",
"if",
"not",
"self",
".",
"connected",
":",
"self",
".",
"log",
".",
"error",
"(",
"'Piper %s is not connected.'",
"%",
"self",
")",
"raise",
"PiperError",
"(",
"'Piper %s is not connected.'",
"%",
"self",
")",
"if",
"not",
"self",
".",
"started",
":",
"if",
"0",
"in",
"stages",
":",
"self",
".",
"tees",
".",
"extend",
"(",
"tee",
"(",
"self",
",",
"self",
".",
"tee_num",
")",
")",
"if",
"hasattr",
"(",
"self",
".",
"imap",
",",
"'start'",
")",
":",
"# parallel piper",
"self",
".",
"imap",
".",
"start",
"(",
"stages",
")",
"if",
"2",
"in",
"stages",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Piper %s has been started using %s'",
"%",
"(",
"self",
",",
"self",
".",
"imap",
")",
")",
"self",
".",
"started",
"=",
"True",
"else",
":",
"# linear piper",
"self",
".",
"log",
".",
"debug",
"(",
"'Piper %s has been started using %s'",
"%",
"(",
"self",
",",
"self",
".",
"imap",
")",
")",
"self",
".",
"started",
"=",
"True"
] | Makes the ``Piper`` ready to return results. This involves starting the
the provided ``NuMap`` instance. If multiple ``Pipers`` share a
``NuMap`` instance the order in which these ``Pipers`` are started is
important. The valid order is upstream before downstream. The ``NuMap``
instance can only be started once, but the process can be done in 2
stages. This methods "stages" argument is a ``tuple`` which can contain
any the numbers ``0`` and/or ``1`` and/or ``2`` specifying which stage
of the start routine should be carried out:
- stage 0 - creates the needed ``itertools.tee`` objects.
- stage 1 - activates ``NuMap`` pool. A call to ``next`` will block..
- stage 2 - activates ``NuMap`` pool managers.
If this ``Piper`` shares a ``NuMap`` with other ``Pipers`` the proper
way to start them is to start them in a valid postorder with stages
``(0, 1)`` and ``(2,)`` separately.
Arguments:
- stages(tuple) [default: ``(0,)`` if linear; ``(0,1,2)`` if parallel]
Performs the specified stages of the start of a ``Piper`` instance.
Stage ``0`` is necessary and sufficient to start a linear ``Piper``
which uses an ``itertools.imap``. Stages ``1`` and ``2`` are
required to start any parallel ``Piper`` instance. | [
"Makes",
"the",
"Piper",
"ready",
"to",
"return",
"results",
".",
"This",
"involves",
"starting",
"the",
"the",
"provided",
"NuMap",
"instance",
".",
"If",
"multiple",
"Pipers",
"share",
"a",
"NuMap",
"instance",
"the",
"order",
"in",
"which",
"these",
"Pipers",
"are",
"started",
"is",
"important",
".",
"The",
"valid",
"order",
"is",
"upstream",
"before",
"downstream",
".",
"The",
"NuMap",
"instance",
"can",
"only",
"be",
"started",
"once",
"but",
"the",
"process",
"can",
"be",
"done",
"in",
"2",
"stages",
".",
"This",
"methods",
"stages",
"argument",
"is",
"a",
"tuple",
"which",
"can",
"contain",
"any",
"the",
"numbers",
"0",
"and",
"/",
"or",
"1",
"and",
"/",
"or",
"2",
"specifying",
"which",
"stage",
"of",
"the",
"start",
"routine",
"should",
"be",
"carried",
"out",
":",
"-",
"stage",
"0",
"-",
"creates",
"the",
"needed",
"itertools",
".",
"tee",
"objects",
".",
"-",
"stage",
"1",
"-",
"activates",
"NuMap",
"pool",
".",
"A",
"call",
"to",
"next",
"will",
"block",
"..",
"-",
"stage",
"2",
"-",
"activates",
"NuMap",
"pool",
"managers",
".",
"If",
"this",
"Piper",
"shares",
"a",
"NuMap",
"with",
"other",
"Pipers",
"the",
"proper",
"way",
"to",
"start",
"them",
"is",
"to",
"start",
"them",
"in",
"a",
"valid",
"postorder",
"with",
"stages",
"(",
"0",
"1",
")",
"and",
"(",
"2",
")",
"separately",
".",
"Arguments",
":",
"-",
"stages",
"(",
"tuple",
")",
"[",
"default",
":",
"(",
"0",
")",
"if",
"linear",
";",
"(",
"0",
"1",
"2",
")",
"if",
"parallel",
"]",
"Performs",
"the",
"specified",
"stages",
"of",
"the",
"start",
"of",
"a",
"Piper",
"instance",
".",
"Stage",
"0",
"is",
"necessary",
"and",
"sufficient",
"to",
"start",
"a",
"linear",
"Piper",
"which",
"uses",
"an",
"itertools",
".",
"imap",
".",
"Stages",
"1",
"and",
"2",
"are",
"required",
"to",
"start",
"any",
"parallel",
"Piper",
"instance",
"."
] | python | train |
avanwyk/cipy | cipy/algorithms/pso/functions.py | https://github.com/avanwyk/cipy/blob/98450dd01767b3615c113e50dc396f135e177b29/cipy/algorithms/pso/functions.py#L123-L138 | def initialize_particle(rng, domain, fitness_function):
""" Initializes a particle within a domain.
Args:
rng: numpy.random.RandomState: The random number generator.
domain: cipy.problems.core.Domain: The domain of the problem.
Returns:
cipy.algorithms.pso.Particle: A new, fully initialized particle.
"""
position = rng.uniform(domain.lower, domain.upper, domain.dimension)
fitness = fitness_function(position)
return Particle(position=position,
velocity=np.zeros(domain.dimension),
fitness=fitness,
best_fitness=fitness,
best_position=position) | [
"def",
"initialize_particle",
"(",
"rng",
",",
"domain",
",",
"fitness_function",
")",
":",
"position",
"=",
"rng",
".",
"uniform",
"(",
"domain",
".",
"lower",
",",
"domain",
".",
"upper",
",",
"domain",
".",
"dimension",
")",
"fitness",
"=",
"fitness_function",
"(",
"position",
")",
"return",
"Particle",
"(",
"position",
"=",
"position",
",",
"velocity",
"=",
"np",
".",
"zeros",
"(",
"domain",
".",
"dimension",
")",
",",
"fitness",
"=",
"fitness",
",",
"best_fitness",
"=",
"fitness",
",",
"best_position",
"=",
"position",
")"
] | Initializes a particle within a domain.
Args:
rng: numpy.random.RandomState: The random number generator.
domain: cipy.problems.core.Domain: The domain of the problem.
Returns:
cipy.algorithms.pso.Particle: A new, fully initialized particle. | [
"Initializes",
"a",
"particle",
"within",
"a",
"domain",
".",
"Args",
":",
"rng",
":",
"numpy",
".",
"random",
".",
"RandomState",
":",
"The",
"random",
"number",
"generator",
".",
"domain",
":",
"cipy",
".",
"problems",
".",
"core",
".",
"Domain",
":",
"The",
"domain",
"of",
"the",
"problem",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.