repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
CivicSpleen/ambry
ambry/orm/config.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/config.py#L260-L267
def new_datetime(self): """Return the time the bundle was created as a datetime object""" from datetime import datetime try: return datetime.fromtimestamp(self.state.new) except TypeError: return None
[ "def", "new_datetime", "(", "self", ")", ":", "from", "datetime", "import", "datetime", "try", ":", "return", "datetime", ".", "fromtimestamp", "(", "self", ".", "state", ".", "new", ")", "except", "TypeError", ":", "return", "None" ]
Return the time the bundle was created as a datetime object
[ "Return", "the", "time", "the", "bundle", "was", "created", "as", "a", "datetime", "object" ]
python
train
pypa/pipenv
pipenv/vendor/passa/internals/_pip_shims.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/passa/internals/_pip_shims.py#L24-L38
def _build_wheel_modern(ireq, output_dir, finder, wheel_cache, kwargs): """Build a wheel. * ireq: The InstallRequirement object to build * output_dir: The directory to build the wheel in. * finder: pip's internal Finder object to find the source out of ireq. * kwargs: Various keyword arguments from `_prepare_wheel_building_kwargs`. """ kwargs.update({"progress_bar": "off", "build_isolation": False}) with pip_shims.RequirementTracker() as req_tracker: if req_tracker: kwargs["req_tracker"] = req_tracker preparer = pip_shims.RequirementPreparer(**kwargs) builder = pip_shims.WheelBuilder(finder, preparer, wheel_cache) return builder._build_one(ireq, output_dir)
[ "def", "_build_wheel_modern", "(", "ireq", ",", "output_dir", ",", "finder", ",", "wheel_cache", ",", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "\"progress_bar\"", ":", "\"off\"", ",", "\"build_isolation\"", ":", "False", "}", ")", "with", "pip_shims", ".", "RequirementTracker", "(", ")", "as", "req_tracker", ":", "if", "req_tracker", ":", "kwargs", "[", "\"req_tracker\"", "]", "=", "req_tracker", "preparer", "=", "pip_shims", ".", "RequirementPreparer", "(", "*", "*", "kwargs", ")", "builder", "=", "pip_shims", ".", "WheelBuilder", "(", "finder", ",", "preparer", ",", "wheel_cache", ")", "return", "builder", ".", "_build_one", "(", "ireq", ",", "output_dir", ")" ]
Build a wheel. * ireq: The InstallRequirement object to build * output_dir: The directory to build the wheel in. * finder: pip's internal Finder object to find the source out of ireq. * kwargs: Various keyword arguments from `_prepare_wheel_building_kwargs`.
[ "Build", "a", "wheel", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/process.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/process.py#L490-L514
def __fixup_labels(self, disasm): """ Private method used when disassembling from process memory. It has no return value because the list is modified in place. On return all raw memory addresses are replaced by labels when possible. @type disasm: list of tuple(int, int, str, str) @param disasm: Output of one of the dissassembly functions. """ for index in compat.xrange(len(disasm)): (address, size, text, dump) = disasm[index] m = self.__hexa_parameter.search(text) while m: s, e = m.span() value = text[s:e] try: label = self.get_label_at_address( int(value, 0x10) ) except Exception: label = None if label: text = text[:s] + label + text[e:] e = s + len(value) m = self.__hexa_parameter.search(text, e) disasm[index] = (address, size, text, dump)
[ "def", "__fixup_labels", "(", "self", ",", "disasm", ")", ":", "for", "index", "in", "compat", ".", "xrange", "(", "len", "(", "disasm", ")", ")", ":", "(", "address", ",", "size", ",", "text", ",", "dump", ")", "=", "disasm", "[", "index", "]", "m", "=", "self", ".", "__hexa_parameter", ".", "search", "(", "text", ")", "while", "m", ":", "s", ",", "e", "=", "m", ".", "span", "(", ")", "value", "=", "text", "[", "s", ":", "e", "]", "try", ":", "label", "=", "self", ".", "get_label_at_address", "(", "int", "(", "value", ",", "0x10", ")", ")", "except", "Exception", ":", "label", "=", "None", "if", "label", ":", "text", "=", "text", "[", ":", "s", "]", "+", "label", "+", "text", "[", "e", ":", "]", "e", "=", "s", "+", "len", "(", "value", ")", "m", "=", "self", ".", "__hexa_parameter", ".", "search", "(", "text", ",", "e", ")", "disasm", "[", "index", "]", "=", "(", "address", ",", "size", ",", "text", ",", "dump", ")" ]
Private method used when disassembling from process memory. It has no return value because the list is modified in place. On return all raw memory addresses are replaced by labels when possible. @type disasm: list of tuple(int, int, str, str) @param disasm: Output of one of the dissassembly functions.
[ "Private", "method", "used", "when", "disassembling", "from", "process", "memory", "." ]
python
train
adrn/gala
gala/dynamics/_genfunc/genfunc_3d.py
https://github.com/adrn/gala/blob/ea95575a0df1581bb4b0986aebd6eea8438ab7eb/gala/dynamics/_genfunc/genfunc_3d.py#L30-L44
def check_angle_solution(ang,n_vec,toy_aa,timeseries): """ Plots the toy angle solution against the toy angles --- Takes true angles and frequencies ang, the Fourier vectors n_vec, the toy action-angles toy_aa and the timeseries """ f,a=plt.subplots(3,1) for i in range(3): a[i].plot(toy_aa.T[i+3],'.') size = len(ang[6:])/3 AA = np.array([np.sum(ang[6+i*size:6+(i+1)*size]*np.sin(np.sum(n_vec*K,axis=1))) for K in toy_aa.T[3:].T]) a[i].plot((ang[i]+ang[i+3]*timeseries-2.*AA) % (2.*np.pi),'.') a[i].set_ylabel(r'$\theta$'+str(i+1)) a[2].set_xlabel(r'$t$') plt.show()
[ "def", "check_angle_solution", "(", "ang", ",", "n_vec", ",", "toy_aa", ",", "timeseries", ")", ":", "f", ",", "a", "=", "plt", ".", "subplots", "(", "3", ",", "1", ")", "for", "i", "in", "range", "(", "3", ")", ":", "a", "[", "i", "]", ".", "plot", "(", "toy_aa", ".", "T", "[", "i", "+", "3", "]", ",", "'.'", ")", "size", "=", "len", "(", "ang", "[", "6", ":", "]", ")", "/", "3", "AA", "=", "np", ".", "array", "(", "[", "np", ".", "sum", "(", "ang", "[", "6", "+", "i", "*", "size", ":", "6", "+", "(", "i", "+", "1", ")", "*", "size", "]", "*", "np", ".", "sin", "(", "np", ".", "sum", "(", "n_vec", "*", "K", ",", "axis", "=", "1", ")", ")", ")", "for", "K", "in", "toy_aa", ".", "T", "[", "3", ":", "]", ".", "T", "]", ")", "a", "[", "i", "]", ".", "plot", "(", "(", "ang", "[", "i", "]", "+", "ang", "[", "i", "+", "3", "]", "*", "timeseries", "-", "2.", "*", "AA", ")", "%", "(", "2.", "*", "np", ".", "pi", ")", ",", "'.'", ")", "a", "[", "i", "]", ".", "set_ylabel", "(", "r'$\\theta$'", "+", "str", "(", "i", "+", "1", ")", ")", "a", "[", "2", "]", ".", "set_xlabel", "(", "r'$t$'", ")", "plt", ".", "show", "(", ")" ]
Plots the toy angle solution against the toy angles --- Takes true angles and frequencies ang, the Fourier vectors n_vec, the toy action-angles toy_aa and the timeseries
[ "Plots", "the", "toy", "angle", "solution", "against", "the", "toy", "angles", "---", "Takes", "true", "angles", "and", "frequencies", "ang", "the", "Fourier", "vectors", "n_vec", "the", "toy", "action", "-", "angles", "toy_aa", "and", "the", "timeseries" ]
python
train
pandas-dev/pandas
pandas/core/arrays/categorical.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1126-L1206
def map(self, mapper): """ Map categories using input correspondence (dict, Series, or function). Maps the categories to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.Categorical` which has the same order property as the original, otherwise a :class:`~pandas.Index` is returned. NaN values are unaffected. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. Returns ------- pandas.Categorical or pandas.Index Mapped categorical. See Also -------- CategoricalIndex.map : Apply a mapping correspondence on a :class:`~pandas.CategoricalIndex`. Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> cat = pd.Categorical(['a', 'b', 'c']) >>> cat [a, b, c] Categories (3, object): [a, b, c] >>> cat.map(lambda x: x.upper()) [A, B, C] Categories (3, object): [A, B, C] >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}) [first, second, third] Categories (3, object): [first, second, third] If the mapping is one-to-one the ordering of the categories is preserved: >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True) >>> cat [a, b, c] Categories (3, object): [a < b < c] >>> cat.map({'a': 3, 'b': 2, 'c': 1}) [3, 2, 1] Categories (3, int64): [3 < 2 < 1] If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> cat.map({'a': 'first', 'b': 'second'}) Index(['first', 'second', nan], dtype='object') """ new_categories = self.categories.map(mapper) try: return self.from_codes(self._codes.copy(), categories=new_categories, ordered=self.ordered) except ValueError: # NA values are represented in self._codes with -1 # np.take causes NA values to take final element in new_categories if np.any(self._codes == -1): new_categories = new_categories.insert(len(new_categories), np.nan) return np.take(new_categories, self._codes)
[ "def", "map", "(", "self", ",", "mapper", ")", ":", "new_categories", "=", "self", ".", "categories", ".", "map", "(", "mapper", ")", "try", ":", "return", "self", ".", "from_codes", "(", "self", ".", "_codes", ".", "copy", "(", ")", ",", "categories", "=", "new_categories", ",", "ordered", "=", "self", ".", "ordered", ")", "except", "ValueError", ":", "# NA values are represented in self._codes with -1", "# np.take causes NA values to take final element in new_categories", "if", "np", ".", "any", "(", "self", ".", "_codes", "==", "-", "1", ")", ":", "new_categories", "=", "new_categories", ".", "insert", "(", "len", "(", "new_categories", ")", ",", "np", ".", "nan", ")", "return", "np", ".", "take", "(", "new_categories", ",", "self", ".", "_codes", ")" ]
Map categories using input correspondence (dict, Series, or function). Maps the categories to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.Categorical` which has the same order property as the original, otherwise a :class:`~pandas.Index` is returned. NaN values are unaffected. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. Returns ------- pandas.Categorical or pandas.Index Mapped categorical. See Also -------- CategoricalIndex.map : Apply a mapping correspondence on a :class:`~pandas.CategoricalIndex`. Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> cat = pd.Categorical(['a', 'b', 'c']) >>> cat [a, b, c] Categories (3, object): [a, b, c] >>> cat.map(lambda x: x.upper()) [A, B, C] Categories (3, object): [A, B, C] >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}) [first, second, third] Categories (3, object): [first, second, third] If the mapping is one-to-one the ordering of the categories is preserved: >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True) >>> cat [a, b, c] Categories (3, object): [a < b < c] >>> cat.map({'a': 3, 'b': 2, 'c': 1}) [3, 2, 1] Categories (3, int64): [3 < 2 < 1] If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> cat.map({'a': 'first', 'b': 'second'}) Index(['first', 'second', nan], dtype='object')
[ "Map", "categories", "using", "input", "correspondence", "(", "dict", "Series", "or", "function", ")", "." ]
python
train
summa-tx/riemann
riemann/tx/shared.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/shared.py#L180-L212
def from_bytes(VarInt, byte_string): ''' byte-like -> VarInt accepts arbitrary length input, gets a VarInt off the front ''' num = byte_string if num[0] <= 0xfc: num = num[0:1] non_compact = False elif num[0] == 0xfd: num = num[1:3] non_compact = (num[-1:] == b'\x00') elif num[0] == 0xfe: num = num[1:5] non_compact = (num[-2:] == b'\x00\x00') elif num[0] == 0xff: num = num[1:9] non_compact = (num[-4:] == b'\x00\x00\x00\x00') if len(num) not in [1, 2, 4, 8]: raise ValueError('Malformed VarInt. Got: {}' .format(byte_string.hex())) if (non_compact and ('overwinter' in riemann.get_current_network_name() or 'sapling' in riemann.get_current_network_name())): raise ValueError('VarInt must be compact. Got: {}' .format(byte_string.hex())) ret = VarInt( utils.le2i(num), length=len(num) + 1 if non_compact else 0) return ret
[ "def", "from_bytes", "(", "VarInt", ",", "byte_string", ")", ":", "num", "=", "byte_string", "if", "num", "[", "0", "]", "<=", "0xfc", ":", "num", "=", "num", "[", "0", ":", "1", "]", "non_compact", "=", "False", "elif", "num", "[", "0", "]", "==", "0xfd", ":", "num", "=", "num", "[", "1", ":", "3", "]", "non_compact", "=", "(", "num", "[", "-", "1", ":", "]", "==", "b'\\x00'", ")", "elif", "num", "[", "0", "]", "==", "0xfe", ":", "num", "=", "num", "[", "1", ":", "5", "]", "non_compact", "=", "(", "num", "[", "-", "2", ":", "]", "==", "b'\\x00\\x00'", ")", "elif", "num", "[", "0", "]", "==", "0xff", ":", "num", "=", "num", "[", "1", ":", "9", "]", "non_compact", "=", "(", "num", "[", "-", "4", ":", "]", "==", "b'\\x00\\x00\\x00\\x00'", ")", "if", "len", "(", "num", ")", "not", "in", "[", "1", ",", "2", ",", "4", ",", "8", "]", ":", "raise", "ValueError", "(", "'Malformed VarInt. Got: {}'", ".", "format", "(", "byte_string", ".", "hex", "(", ")", ")", ")", "if", "(", "non_compact", "and", "(", "'overwinter'", "in", "riemann", ".", "get_current_network_name", "(", ")", "or", "'sapling'", "in", "riemann", ".", "get_current_network_name", "(", ")", ")", ")", ":", "raise", "ValueError", "(", "'VarInt must be compact. Got: {}'", ".", "format", "(", "byte_string", ".", "hex", "(", ")", ")", ")", "ret", "=", "VarInt", "(", "utils", ".", "le2i", "(", "num", ")", ",", "length", "=", "len", "(", "num", ")", "+", "1", "if", "non_compact", "else", "0", ")", "return", "ret" ]
byte-like -> VarInt accepts arbitrary length input, gets a VarInt off the front
[ "byte", "-", "like", "-", ">", "VarInt", "accepts", "arbitrary", "length", "input", "gets", "a", "VarInt", "off", "the", "front" ]
python
train
nephila/djangocms-installer
djangocms_installer/django/__init__.py
https://github.com/nephila/djangocms-installer/blob/9fec66d5f8b1e9a0f3c0ec66dd777db578fab07e/djangocms_installer/django/__init__.py#L448-L465
def create_user(config_data): """ Create admin user without user input :param config_data: configuration data """ with chdir(os.path.abspath(config_data.project_directory)): env = deepcopy(dict(os.environ)) env[str('DJANGO_SETTINGS_MODULE')] = str('{0}.settings'.format(config_data.project_name)) env[str('PYTHONPATH')] = str(os.pathsep.join(map(shlex_quote, sys.path))) subprocess.check_call( [sys.executable, 'create_user.py'], env=env, stderr=subprocess.STDOUT ) for ext in ['py', 'pyc']: try: os.remove('create_user.{0}'.format(ext)) except OSError: pass
[ "def", "create_user", "(", "config_data", ")", ":", "with", "chdir", "(", "os", ".", "path", ".", "abspath", "(", "config_data", ".", "project_directory", ")", ")", ":", "env", "=", "deepcopy", "(", "dict", "(", "os", ".", "environ", ")", ")", "env", "[", "str", "(", "'DJANGO_SETTINGS_MODULE'", ")", "]", "=", "str", "(", "'{0}.settings'", ".", "format", "(", "config_data", ".", "project_name", ")", ")", "env", "[", "str", "(", "'PYTHONPATH'", ")", "]", "=", "str", "(", "os", ".", "pathsep", ".", "join", "(", "map", "(", "shlex_quote", ",", "sys", ".", "path", ")", ")", ")", "subprocess", ".", "check_call", "(", "[", "sys", ".", "executable", ",", "'create_user.py'", "]", ",", "env", "=", "env", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "for", "ext", "in", "[", "'py'", ",", "'pyc'", "]", ":", "try", ":", "os", ".", "remove", "(", "'create_user.{0}'", ".", "format", "(", "ext", ")", ")", "except", "OSError", ":", "pass" ]
Create admin user without user input :param config_data: configuration data
[ "Create", "admin", "user", "without", "user", "input" ]
python
valid
sparklingpandas/sparklingpandas
sparklingpandas/custom_functions.py
https://github.com/sparklingpandas/sparklingpandas/blob/7d549df4348c979042b683c355aa778fc6d3a768/sparklingpandas/custom_functions.py#L10-L20
def _create_function(name, doc=""): """ Create a function for aggregator by name""" def _(col): spark_ctx = SparkContext._active_spark_context java_ctx = (getattr(spark_ctx._jvm.com.sparklingpandas.functions, name) (col._java_ctx if isinstance(col, Column) else col)) return Column(java_ctx) _.__name__ = name _.__doc__ = doc return _
[ "def", "_create_function", "(", "name", ",", "doc", "=", "\"\"", ")", ":", "def", "_", "(", "col", ")", ":", "spark_ctx", "=", "SparkContext", ".", "_active_spark_context", "java_ctx", "=", "(", "getattr", "(", "spark_ctx", ".", "_jvm", ".", "com", ".", "sparklingpandas", ".", "functions", ",", "name", ")", "(", "col", ".", "_java_ctx", "if", "isinstance", "(", "col", ",", "Column", ")", "else", "col", ")", ")", "return", "Column", "(", "java_ctx", ")", "_", ".", "__name__", "=", "name", "_", ".", "__doc__", "=", "doc", "return", "_" ]
Create a function for aggregator by name
[ "Create", "a", "function", "for", "aggregator", "by", "name" ]
python
train
maas/python-libmaas
maas/client/bones/__init__.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/bones/__init__.py#L85-L97
async def connect( cls, url, *, apikey=None, insecure=False): """Make a `SessionAPI` by connecting with an apikey. :return: A tuple of ``profile`` and ``session``, where the former is an unsaved `Profile` instance, and the latter is a `SessionAPI` instance made using the profile. """ profile = await helpers.connect( url=url, apikey=apikey, insecure=insecure) session = cls(profile.description, profile.credentials) session.insecure = insecure return profile, session
[ "async", "def", "connect", "(", "cls", ",", "url", ",", "*", ",", "apikey", "=", "None", ",", "insecure", "=", "False", ")", ":", "profile", "=", "await", "helpers", ".", "connect", "(", "url", "=", "url", ",", "apikey", "=", "apikey", ",", "insecure", "=", "insecure", ")", "session", "=", "cls", "(", "profile", ".", "description", ",", "profile", ".", "credentials", ")", "session", ".", "insecure", "=", "insecure", "return", "profile", ",", "session" ]
Make a `SessionAPI` by connecting with an apikey. :return: A tuple of ``profile`` and ``session``, where the former is an unsaved `Profile` instance, and the latter is a `SessionAPI` instance made using the profile.
[ "Make", "a", "SessionAPI", "by", "connecting", "with", "an", "apikey", "." ]
python
train
bapakode/OmMongo
ommongo/query.py
https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/query.py#L594-L598
def nin(self, qfield, *values): ''' Works the same as the query expression method ``nin_`` ''' self.__query_obj.nin(qfield, *values) return self
[ "def", "nin", "(", "self", ",", "qfield", ",", "*", "values", ")", ":", "self", ".", "__query_obj", ".", "nin", "(", "qfield", ",", "*", "values", ")", "return", "self" ]
Works the same as the query expression method ``nin_``
[ "Works", "the", "same", "as", "the", "query", "expression", "method", "nin_" ]
python
train
BlackEarth/bxml
bxml/xml.py
https://github.com/BlackEarth/bxml/blob/8fbea5dad7fadc7b854ddbeff6ecfb55aaceeb77/bxml/xml.py#L751-L758
def wrap_content(cls, container, wrapper): "wrap the content of container element with wrapper element" wrapper.text = (container.text or '') + (wrapper.text or '') container.text = '' for ch in container: wrapper.append(ch) container.insert(0, wrapper) return container
[ "def", "wrap_content", "(", "cls", ",", "container", ",", "wrapper", ")", ":", "wrapper", ".", "text", "=", "(", "container", ".", "text", "or", "''", ")", "+", "(", "wrapper", ".", "text", "or", "''", ")", "container", ".", "text", "=", "''", "for", "ch", "in", "container", ":", "wrapper", ".", "append", "(", "ch", ")", "container", ".", "insert", "(", "0", ",", "wrapper", ")", "return", "container" ]
wrap the content of container element with wrapper element
[ "wrap", "the", "content", "of", "container", "element", "with", "wrapper", "element" ]
python
train
IBMStreams/pypi.streamsx
streamsx/rest_primitives.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest_primitives.py#L1223-L1245
def retrieve_console_log(self, filename=None, dir=None): """Retrieves the application console log (standard out and error) files for this PE and saves them as a plain text file. An existing file with the same name will be overwritten. Args: filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.stdouterr` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``. dir (str): a valid directory in which to save the file. Defaults to the current directory. Returns: str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted. .. versionadded:: 1.9 """ if hasattr(self, "consoleLog") and self.consoleLog is not None: logger.debug("Retrieving PE console log: " + self.consoleLog) if not filename: filename = _file_name('pe', self.id, '.stdouterr') return self.rest_client._retrieve_file(self.consoleLog, filename, dir, 'text/plain') else: return None
[ "def", "retrieve_console_log", "(", "self", ",", "filename", "=", "None", ",", "dir", "=", "None", ")", ":", "if", "hasattr", "(", "self", ",", "\"consoleLog\"", ")", "and", "self", ".", "consoleLog", "is", "not", "None", ":", "logger", ".", "debug", "(", "\"Retrieving PE console log: \"", "+", "self", ".", "consoleLog", ")", "if", "not", "filename", ":", "filename", "=", "_file_name", "(", "'pe'", ",", "self", ".", "id", ",", "'.stdouterr'", ")", "return", "self", ".", "rest_client", ".", "_retrieve_file", "(", "self", ".", "consoleLog", ",", "filename", ",", "dir", ",", "'text/plain'", ")", "else", ":", "return", "None" ]
Retrieves the application console log (standard out and error) files for this PE and saves them as a plain text file. An existing file with the same name will be overwritten. Args: filename (str): name of the created file. Defaults to `pe_<id>_<timestamp>.stdouterr` where `id` is the PE identifier and `timestamp` is the number of seconds since the Unix epoch, for example ``pe_83_1511995995.trace``. dir (str): a valid directory in which to save the file. Defaults to the current directory. Returns: str: the path to the created file, or None if retrieving a job's logs is not supported in the version of streams to which the job is submitted. .. versionadded:: 1.9
[ "Retrieves", "the", "application", "console", "log", "(", "standard", "out", "and", "error", ")", "files", "for", "this", "PE", "and", "saves", "them", "as", "a", "plain", "text", "file", "." ]
python
train
jaraco/jaraco.functools
jaraco/functools.py
https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L386-L423
def assign_params(func, namespace): """ Assign parameters from namespace where func solicits. >>> def func(x, y=3): ... print(x, y) >>> assigned = assign_params(func, dict(x=2, z=4)) >>> assigned() 2 3 The usual errors are raised if a function doesn't receive its required parameters: >>> assigned = assign_params(func, dict(y=3, z=4)) >>> assigned() Traceback (most recent call last): TypeError: func() ...argument... It even works on methods: >>> class Handler: ... def meth(self, arg): ... print(arg) >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))() crystal """ try: sig = inspect.signature(func) params = sig.parameters.keys() except AttributeError: spec = inspect.getargspec(func) params = spec.args call_ns = { k: namespace[k] for k in params if k in namespace } return functools.partial(func, **call_ns)
[ "def", "assign_params", "(", "func", ",", "namespace", ")", ":", "try", ":", "sig", "=", "inspect", ".", "signature", "(", "func", ")", "params", "=", "sig", ".", "parameters", ".", "keys", "(", ")", "except", "AttributeError", ":", "spec", "=", "inspect", ".", "getargspec", "(", "func", ")", "params", "=", "spec", ".", "args", "call_ns", "=", "{", "k", ":", "namespace", "[", "k", "]", "for", "k", "in", "params", "if", "k", "in", "namespace", "}", "return", "functools", ".", "partial", "(", "func", ",", "*", "*", "call_ns", ")" ]
Assign parameters from namespace where func solicits. >>> def func(x, y=3): ... print(x, y) >>> assigned = assign_params(func, dict(x=2, z=4)) >>> assigned() 2 3 The usual errors are raised if a function doesn't receive its required parameters: >>> assigned = assign_params(func, dict(y=3, z=4)) >>> assigned() Traceback (most recent call last): TypeError: func() ...argument... It even works on methods: >>> class Handler: ... def meth(self, arg): ... print(arg) >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))() crystal
[ "Assign", "parameters", "from", "namespace", "where", "func", "solicits", "." ]
python
train
janpipek/physt
physt/plotting/matplotlib.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/matplotlib.py#L415-L450
def image(h2: Histogram2D, ax: Axes, *, show_colorbar: bool = True, interpolation: str = "nearest", **kwargs): """Plot of 2D histograms based on pixmaps. Similar to map, but it: - has fewer options - is much more effective (enables thousands) - does not support irregular bins Parameters ---------- interpolation: interpolation parameter passed to imshow, default: "nearest" (creates rectangles) """ cmap = _get_cmap(kwargs) # h2 as well? data = get_data(h2, cumulative=False, density=kwargs.pop("density", False)) norm, cmap_data = _get_cmap_data(data, kwargs) # zorder = kwargs.pop("zorder", None) for binning in h2._binnings: if not binning.is_regular(): raise RuntimeError( "Histograms with irregular bins cannot be plotted using image method.") kwargs["interpolation"] = interpolation if kwargs.get("xscale") == "log" or kwargs.get("yscale") == "log": raise RuntimeError("Cannot use logarithmic axes with image plots.") _apply_xy_lims(ax, h2, data=data, kwargs=kwargs) _add_labels(ax, h2, kwargs) ax.imshow(data.T[::-1, :], cmap=cmap, norm=norm, extent=(h2.bins[0][0, 0], h2.bins[0][-1, 1], h2.bins[1][0, 0], h2.bins[1][-1, 1]), aspect="auto", **kwargs) if show_colorbar: _add_colorbar(ax, cmap, cmap_data, norm)
[ "def", "image", "(", "h2", ":", "Histogram2D", ",", "ax", ":", "Axes", ",", "*", ",", "show_colorbar", ":", "bool", "=", "True", ",", "interpolation", ":", "str", "=", "\"nearest\"", ",", "*", "*", "kwargs", ")", ":", "cmap", "=", "_get_cmap", "(", "kwargs", ")", "# h2 as well?", "data", "=", "get_data", "(", "h2", ",", "cumulative", "=", "False", ",", "density", "=", "kwargs", ".", "pop", "(", "\"density\"", ",", "False", ")", ")", "norm", ",", "cmap_data", "=", "_get_cmap_data", "(", "data", ",", "kwargs", ")", "# zorder = kwargs.pop(\"zorder\", None)", "for", "binning", "in", "h2", ".", "_binnings", ":", "if", "not", "binning", ".", "is_regular", "(", ")", ":", "raise", "RuntimeError", "(", "\"Histograms with irregular bins cannot be plotted using image method.\"", ")", "kwargs", "[", "\"interpolation\"", "]", "=", "interpolation", "if", "kwargs", ".", "get", "(", "\"xscale\"", ")", "==", "\"log\"", "or", "kwargs", ".", "get", "(", "\"yscale\"", ")", "==", "\"log\"", ":", "raise", "RuntimeError", "(", "\"Cannot use logarithmic axes with image plots.\"", ")", "_apply_xy_lims", "(", "ax", ",", "h2", ",", "data", "=", "data", ",", "kwargs", "=", "kwargs", ")", "_add_labels", "(", "ax", ",", "h2", ",", "kwargs", ")", "ax", ".", "imshow", "(", "data", ".", "T", "[", ":", ":", "-", "1", ",", ":", "]", ",", "cmap", "=", "cmap", ",", "norm", "=", "norm", ",", "extent", "=", "(", "h2", ".", "bins", "[", "0", "]", "[", "0", ",", "0", "]", ",", "h2", ".", "bins", "[", "0", "]", "[", "-", "1", ",", "1", "]", ",", "h2", ".", "bins", "[", "1", "]", "[", "0", ",", "0", "]", ",", "h2", ".", "bins", "[", "1", "]", "[", "-", "1", ",", "1", "]", ")", ",", "aspect", "=", "\"auto\"", ",", "*", "*", "kwargs", ")", "if", "show_colorbar", ":", "_add_colorbar", "(", "ax", ",", "cmap", ",", "cmap_data", ",", "norm", ")" ]
Plot of 2D histograms based on pixmaps. Similar to map, but it: - has fewer options - is much more effective (enables thousands) - does not support irregular bins Parameters ---------- interpolation: interpolation parameter passed to imshow, default: "nearest" (creates rectangles)
[ "Plot", "of", "2D", "histograms", "based", "on", "pixmaps", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/lstm.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/lstm.py#L206-L225
def lstm_seq2seq_internal_attention(inputs, targets, hparams, train, inputs_length, targets_length): """LSTM seq2seq model with attention, main step used for training.""" with tf.variable_scope("lstm_seq2seq_attention"): # Flatten inputs. inputs = common_layers.flatten4d3d(inputs) # LSTM encoder. inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1) encoder_outputs, final_encoder_state = lstm( inputs, inputs_length, hparams, train, "encoder") # LSTM decoder with attention. shifted_targets = common_layers.shift_right(targets) # Add 1 to account for the padding added to the left from shift_right targets_length = targets_length + 1 decoder_outputs = lstm_attention_decoder( common_layers.flatten4d3d(shifted_targets), hparams, train, "decoder", final_encoder_state, encoder_outputs, inputs_length, targets_length) return tf.expand_dims(decoder_outputs, axis=2)
[ "def", "lstm_seq2seq_internal_attention", "(", "inputs", ",", "targets", ",", "hparams", ",", "train", ",", "inputs_length", ",", "targets_length", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"lstm_seq2seq_attention\"", ")", ":", "# Flatten inputs.", "inputs", "=", "common_layers", ".", "flatten4d3d", "(", "inputs", ")", "# LSTM encoder.", "inputs", "=", "tf", ".", "reverse_sequence", "(", "inputs", ",", "inputs_length", ",", "seq_axis", "=", "1", ")", "encoder_outputs", ",", "final_encoder_state", "=", "lstm", "(", "inputs", ",", "inputs_length", ",", "hparams", ",", "train", ",", "\"encoder\"", ")", "# LSTM decoder with attention.", "shifted_targets", "=", "common_layers", ".", "shift_right", "(", "targets", ")", "# Add 1 to account for the padding added to the left from shift_right", "targets_length", "=", "targets_length", "+", "1", "decoder_outputs", "=", "lstm_attention_decoder", "(", "common_layers", ".", "flatten4d3d", "(", "shifted_targets", ")", ",", "hparams", ",", "train", ",", "\"decoder\"", ",", "final_encoder_state", ",", "encoder_outputs", ",", "inputs_length", ",", "targets_length", ")", "return", "tf", ".", "expand_dims", "(", "decoder_outputs", ",", "axis", "=", "2", ")" ]
LSTM seq2seq model with attention, main step used for training.
[ "LSTM", "seq2seq", "model", "with", "attention", "main", "step", "used", "for", "training", "." ]
python
train
keras-rl/keras-rl
rl/policy.py
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L348-L356
def get_config(self): """Return configurations of BoltzmannGumbelQPolicy # Returns Dict of config """ config = super(BoltzmannGumbelQPolicy, self).get_config() config['C'] = self.C return config
[ "def", "get_config", "(", "self", ")", ":", "config", "=", "super", "(", "BoltzmannGumbelQPolicy", ",", "self", ")", ".", "get_config", "(", ")", "config", "[", "'C'", "]", "=", "self", ".", "C", "return", "config" ]
Return configurations of BoltzmannGumbelQPolicy # Returns Dict of config
[ "Return", "configurations", "of", "BoltzmannGumbelQPolicy" ]
python
train
jtwhite79/pyemu
pyemu/sc.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/sc.py#L420-L499
def get_par_contribution(self,parlist_dict=None,include_prior_results=False): """get a dataframe the prior and posterior uncertainty reduction as a result of some parameter becoming perfectly known Parameters ---------- parlist_dict : dict a nested dictionary-list of groups of parameters that are to be treated as perfectly known. key values become row labels in returned dataframe. If None, each adjustable parameter is sequentially treated as known and the returned dataframe has row labels for each adjustable parameter include_prior_results : bool flag to return a multi-indexed dataframe with both conditional prior and posterior forecast uncertainty estimates. Default is False Returns ------- pandas.DataFrame : pandas.DataFrame a dataframe that summarizes the parameter contribution analysis. The dataframe has index (row labels) of the keys in parlist_dict and a column labels of forecast names. The values in the dataframe are the posterior variance of the forecast conditional on perfect knowledge of the parameters in the values of parlist_dict. Varies depending on `include_prior_results`. Example ------- ``>>>import pyemu`` ``>>>sc = pyemu.Schur(jco="pest.jcb")`` ``>>>df = sc.get_par_contribution()`` """ self.log("calculating contribution from parameters") if parlist_dict is None: parlist_dict = {}#dict(zip(self.pst.adj_par_names,self.pst.adj_par_names)) # make sure all of the adjustable pars are in the jco for pname in self.pst.adj_par_names: if pname in self.jco.col_names: parlist_dict[pname] = pname else: if type(parlist_dict) == list: parlist_dict = dict(zip(parlist_dict,parlist_dict)) results = {} names = ["base"] for forecast in self.prior_forecast.keys(): pr = self.prior_forecast[forecast] pt = self.posterior_forecast[forecast] #reduce = 100.0 * ((pr - pt) / pr) results[(forecast,"prior")] = [pr] results[(forecast,"post")] = [pt] #results[(forecast,"percent_reduce")] = [reduce] for case_name,par_list in parlist_dict.items(): if len(par_list) == 0: continue names.append(case_name) self.log("calculating contribution from: " + str(par_list)) case_prior,case_post = self.__contribution_from_parameters(par_list) self.log("calculating contribution from: " + str(par_list)) for forecast in case_prior.keys(): pr = case_prior[forecast] pt = case_post[forecast] #reduce = 100.0 * ((pr - pt) / pr) results[(forecast, "prior")].append(pr) results[(forecast, "post")].append(pt) #results[(forecast, "percent_reduce")].append(reduce) df = pd.DataFrame(results,index=names) #base = df.loc["base",df.columns.get_level_values(1)=="post"] #df = 1.0 - (df.loc[:,df.columns.get_level_values(1)=="post"] / base) self.log("calculating contribution from parameters") if include_prior_results: return df else: df = df.xs("post", level=1, drop_level=True, axis=1) return df
[ "def", "get_par_contribution", "(", "self", ",", "parlist_dict", "=", "None", ",", "include_prior_results", "=", "False", ")", ":", "self", ".", "log", "(", "\"calculating contribution from parameters\"", ")", "if", "parlist_dict", "is", "None", ":", "parlist_dict", "=", "{", "}", "#dict(zip(self.pst.adj_par_names,self.pst.adj_par_names))", "# make sure all of the adjustable pars are in the jco", "for", "pname", "in", "self", ".", "pst", ".", "adj_par_names", ":", "if", "pname", "in", "self", ".", "jco", ".", "col_names", ":", "parlist_dict", "[", "pname", "]", "=", "pname", "else", ":", "if", "type", "(", "parlist_dict", ")", "==", "list", ":", "parlist_dict", "=", "dict", "(", "zip", "(", "parlist_dict", ",", "parlist_dict", ")", ")", "results", "=", "{", "}", "names", "=", "[", "\"base\"", "]", "for", "forecast", "in", "self", ".", "prior_forecast", ".", "keys", "(", ")", ":", "pr", "=", "self", ".", "prior_forecast", "[", "forecast", "]", "pt", "=", "self", ".", "posterior_forecast", "[", "forecast", "]", "#reduce = 100.0 * ((pr - pt) / pr)", "results", "[", "(", "forecast", ",", "\"prior\"", ")", "]", "=", "[", "pr", "]", "results", "[", "(", "forecast", ",", "\"post\"", ")", "]", "=", "[", "pt", "]", "#results[(forecast,\"percent_reduce\")] = [reduce]", "for", "case_name", ",", "par_list", "in", "parlist_dict", ".", "items", "(", ")", ":", "if", "len", "(", "par_list", ")", "==", "0", ":", "continue", "names", ".", "append", "(", "case_name", ")", "self", ".", "log", "(", "\"calculating contribution from: \"", "+", "str", "(", "par_list", ")", ")", "case_prior", ",", "case_post", "=", "self", ".", "__contribution_from_parameters", "(", "par_list", ")", "self", ".", "log", "(", "\"calculating contribution from: \"", "+", "str", "(", "par_list", ")", ")", "for", "forecast", "in", "case_prior", ".", "keys", "(", ")", ":", "pr", "=", "case_prior", "[", "forecast", "]", "pt", "=", "case_post", "[", "forecast", "]", "#reduce = 100.0 * ((pr - pt) / pr)", "results", "[", "(", "forecast", ",", "\"prior\"", ")", "]", ".", "append", "(", "pr", ")", "results", "[", "(", "forecast", ",", "\"post\"", ")", "]", ".", "append", "(", "pt", ")", "#results[(forecast, \"percent_reduce\")].append(reduce)", "df", "=", "pd", ".", "DataFrame", "(", "results", ",", "index", "=", "names", ")", "#base = df.loc[\"base\",df.columns.get_level_values(1)==\"post\"]", "#df = 1.0 - (df.loc[:,df.columns.get_level_values(1)==\"post\"] / base)", "self", ".", "log", "(", "\"calculating contribution from parameters\"", ")", "if", "include_prior_results", ":", "return", "df", "else", ":", "df", "=", "df", ".", "xs", "(", "\"post\"", ",", "level", "=", "1", ",", "drop_level", "=", "True", ",", "axis", "=", "1", ")", "return", "df" ]
get a dataframe the prior and posterior uncertainty reduction as a result of some parameter becoming perfectly known Parameters ---------- parlist_dict : dict a nested dictionary-list of groups of parameters that are to be treated as perfectly known. key values become row labels in returned dataframe. If None, each adjustable parameter is sequentially treated as known and the returned dataframe has row labels for each adjustable parameter include_prior_results : bool flag to return a multi-indexed dataframe with both conditional prior and posterior forecast uncertainty estimates. Default is False Returns ------- pandas.DataFrame : pandas.DataFrame a dataframe that summarizes the parameter contribution analysis. The dataframe has index (row labels) of the keys in parlist_dict and a column labels of forecast names. The values in the dataframe are the posterior variance of the forecast conditional on perfect knowledge of the parameters in the values of parlist_dict. Varies depending on `include_prior_results`. Example ------- ``>>>import pyemu`` ``>>>sc = pyemu.Schur(jco="pest.jcb")`` ``>>>df = sc.get_par_contribution()``
[ "get", "a", "dataframe", "the", "prior", "and", "posterior", "uncertainty", "reduction", "as", "a", "result", "of", "some", "parameter", "becoming", "perfectly", "known" ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/billing/billing.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/billing/billing.py#L114-L136
def get_report_overview(self, month, file_path): """Downloads a report overview :param month: month as datetime instance, or string in YYYY-MM format :type month: str or datetime :param str file_path: location to store output file :return: outcome :rtype: True or None """ api = self._get_api(billing.DefaultApi) month = self._month_converter(month) response = api.get_billing_report(month=month) if file_path and response: content = api.api_client.sanitize_for_serialization(response.to_dict()) with open(file_path, 'w') as fh: fh.write( json.dumps( content, sort_keys=True, indent=2, ) ) return response
[ "def", "get_report_overview", "(", "self", ",", "month", ",", "file_path", ")", ":", "api", "=", "self", ".", "_get_api", "(", "billing", ".", "DefaultApi", ")", "month", "=", "self", ".", "_month_converter", "(", "month", ")", "response", "=", "api", ".", "get_billing_report", "(", "month", "=", "month", ")", "if", "file_path", "and", "response", ":", "content", "=", "api", ".", "api_client", ".", "sanitize_for_serialization", "(", "response", ".", "to_dict", "(", ")", ")", "with", "open", "(", "file_path", ",", "'w'", ")", "as", "fh", ":", "fh", ".", "write", "(", "json", ".", "dumps", "(", "content", ",", "sort_keys", "=", "True", ",", "indent", "=", "2", ",", ")", ")", "return", "response" ]
Downloads a report overview :param month: month as datetime instance, or string in YYYY-MM format :type month: str or datetime :param str file_path: location to store output file :return: outcome :rtype: True or None
[ "Downloads", "a", "report", "overview" ]
python
train
tamasgal/km3pipe
km3pipe/math.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/math.py#L403-L410
def g_parameter(time_residual): """stolen from thomas""" mean = np.mean(time_residual) time_residual_prime = (time_residual - np.ones(time_residual.shape) * mean) time_residual_prime *= time_residual_prime / (-2 * 1.5 * 1.5) time_residual_prime = np.exp(time_residual_prime) g = np.sum(time_residual_prime) / len(time_residual) return g
[ "def", "g_parameter", "(", "time_residual", ")", ":", "mean", "=", "np", ".", "mean", "(", "time_residual", ")", "time_residual_prime", "=", "(", "time_residual", "-", "np", ".", "ones", "(", "time_residual", ".", "shape", ")", "*", "mean", ")", "time_residual_prime", "*=", "time_residual_prime", "/", "(", "-", "2", "*", "1.5", "*", "1.5", ")", "time_residual_prime", "=", "np", ".", "exp", "(", "time_residual_prime", ")", "g", "=", "np", ".", "sum", "(", "time_residual_prime", ")", "/", "len", "(", "time_residual", ")", "return", "g" ]
stolen from thomas
[ "stolen", "from", "thomas" ]
python
train
boriel/zxbasic
arch/zx48k/optimizer.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L1531-L1563
def is_used(self, regs, i, top=None): """ Checks whether any of the given regs are required from the given point to the end or not. """ if i < 0: i = 0 if self.lock: return True regs = list(regs) # make a copy if top is None: top = len(self) else: top -= 1 for ii in range(i, top): for r in self.mem[ii].requires: if r in regs: return True for r in self.mem[ii].destroys: if r in regs: regs.remove(r) if not regs: return False self.lock = True result = self.goes_requires(regs) self.lock = False return result
[ "def", "is_used", "(", "self", ",", "regs", ",", "i", ",", "top", "=", "None", ")", ":", "if", "i", "<", "0", ":", "i", "=", "0", "if", "self", ".", "lock", ":", "return", "True", "regs", "=", "list", "(", "regs", ")", "# make a copy", "if", "top", "is", "None", ":", "top", "=", "len", "(", "self", ")", "else", ":", "top", "-=", "1", "for", "ii", "in", "range", "(", "i", ",", "top", ")", ":", "for", "r", "in", "self", ".", "mem", "[", "ii", "]", ".", "requires", ":", "if", "r", "in", "regs", ":", "return", "True", "for", "r", "in", "self", ".", "mem", "[", "ii", "]", ".", "destroys", ":", "if", "r", "in", "regs", ":", "regs", ".", "remove", "(", "r", ")", "if", "not", "regs", ":", "return", "False", "self", ".", "lock", "=", "True", "result", "=", "self", ".", "goes_requires", "(", "regs", ")", "self", ".", "lock", "=", "False", "return", "result" ]
Checks whether any of the given regs are required from the given point to the end or not.
[ "Checks", "whether", "any", "of", "the", "given", "regs", "are", "required", "from", "the", "given", "point", "to", "the", "end", "or", "not", "." ]
python
train
horazont/aioxmpp
aioxmpp/private_xml/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/private_xml/service.py#L43-L60
def get_private_xml(self, query_xso): """ Get the private XML data for the element `query_xso` from the server. :param query_xso: the object to retrieve. :returns: the stored private XML data. `query_xso` *must* serialize to an empty XML node of the wanted namespace and type and *must* be registered as private XML :class:`~private_xml_xso.Query` payload. """ iq = aioxmpp.IQ( type_=aioxmpp.IQType.GET, payload=private_xml_xso.Query(query_xso) ) return (yield from self.client.send(iq))
[ "def", "get_private_xml", "(", "self", ",", "query_xso", ")", ":", "iq", "=", "aioxmpp", ".", "IQ", "(", "type_", "=", "aioxmpp", ".", "IQType", ".", "GET", ",", "payload", "=", "private_xml_xso", ".", "Query", "(", "query_xso", ")", ")", "return", "(", "yield", "from", "self", ".", "client", ".", "send", "(", "iq", ")", ")" ]
Get the private XML data for the element `query_xso` from the server. :param query_xso: the object to retrieve. :returns: the stored private XML data. `query_xso` *must* serialize to an empty XML node of the wanted namespace and type and *must* be registered as private XML :class:`~private_xml_xso.Query` payload.
[ "Get", "the", "private", "XML", "data", "for", "the", "element", "query_xso", "from", "the", "server", "." ]
python
train
theislab/anndata
anndata/readwrite/read.py
https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/readwrite/read.py#L42-L65
def read_excel( filename: PathLike, sheet: Union[str, int], dtype: str='float32', ) -> AnnData: """Read ``.xlsx`` (Excel) file. Assumes that the first columns stores the row names and the first row the column names. Parameters ---------- filename File name to read from. sheet Name of sheet in Excel file. """ # rely on pandas for reading an excel file from pandas import read_excel df = read_excel(fspath(filename), sheet) X = df.values[:, 1:] row = {'row_names': df.iloc[:, 0].values.astype(str)} col = {'col_names': np.array(df.columns[1:], dtype=str)} return AnnData(X, row, col, dtype=dtype)
[ "def", "read_excel", "(", "filename", ":", "PathLike", ",", "sheet", ":", "Union", "[", "str", ",", "int", "]", ",", "dtype", ":", "str", "=", "'float32'", ",", ")", "->", "AnnData", ":", "# rely on pandas for reading an excel file", "from", "pandas", "import", "read_excel", "df", "=", "read_excel", "(", "fspath", "(", "filename", ")", ",", "sheet", ")", "X", "=", "df", ".", "values", "[", ":", ",", "1", ":", "]", "row", "=", "{", "'row_names'", ":", "df", ".", "iloc", "[", ":", ",", "0", "]", ".", "values", ".", "astype", "(", "str", ")", "}", "col", "=", "{", "'col_names'", ":", "np", ".", "array", "(", "df", ".", "columns", "[", "1", ":", "]", ",", "dtype", "=", "str", ")", "}", "return", "AnnData", "(", "X", ",", "row", ",", "col", ",", "dtype", "=", "dtype", ")" ]
Read ``.xlsx`` (Excel) file. Assumes that the first columns stores the row names and the first row the column names. Parameters ---------- filename File name to read from. sheet Name of sheet in Excel file.
[ "Read", ".", "xlsx", "(", "Excel", ")", "file", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/exportxml.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/exportxml.py#L355-L394
def add_edurange(self, edurange): """ Parameters ---------- edurange : etree.Element etree representation of a <edurange> element (annotation that groups a number of EDUs) <edu-range> seems to glue together a number of `<edu> elements, which may be scattered over a number of sentences <edu-range> may or may not contain a span attribute (it seems that the span attribute is present, when <edu-range> is a descendent of <sentence>) Example ------- <edu-range xml:id="edus9_3_1-5_0" span="s128_4..s130_7"> <node xml:id="s128_525" cat="SIMPX" func="--"> <edu xml:id="edu_9_3_1"> <discRel relation="Continuation" marking="-" arg2="edu_9_3_2"/> <node xml:id="s128_506" cat="VF" func="-" parent="s128_525"> <node xml:id="s128_505" cat="NX" func="ON" parent="s128_506"> <relation type="expletive"/> <word xml:id="s128_4" form="Es" pos="PPER" morph="nsn3" lemma="es" func="HD" parent="s128_505" dephead="s128_5" deprel="SUBJ"/> </node> </node> ... <edu-range xml:id="edus37_8_0-8_1"> <discRel relation="Restatement" marking="-" arg2="edu_37_9_0"/> <sentence xml:id="s660"> """ edurange_id = self.get_element_id(edurange) edurange_attribs = self.element_attribs_to_dict(edurange) # contains 'span' or nothing self.add_node(edurange_id, layers={self.ns, self.ns+':edu:range'}, attr_dict=edurange_attribs) for edu in edurange.iterdescendants('edu'): edu_id = self.get_element_id(edu) self.add_edge(edurange_id, edu_id, layers={self.ns, self.ns+':edu:range'}, edge_type=dg.EdgeTypes.spanning_relation)
[ "def", "add_edurange", "(", "self", ",", "edurange", ")", ":", "edurange_id", "=", "self", ".", "get_element_id", "(", "edurange", ")", "edurange_attribs", "=", "self", ".", "element_attribs_to_dict", "(", "edurange", ")", "# contains 'span' or nothing", "self", ".", "add_node", "(", "edurange_id", ",", "layers", "=", "{", "self", ".", "ns", ",", "self", ".", "ns", "+", "':edu:range'", "}", ",", "attr_dict", "=", "edurange_attribs", ")", "for", "edu", "in", "edurange", ".", "iterdescendants", "(", "'edu'", ")", ":", "edu_id", "=", "self", ".", "get_element_id", "(", "edu", ")", "self", ".", "add_edge", "(", "edurange_id", ",", "edu_id", ",", "layers", "=", "{", "self", ".", "ns", ",", "self", ".", "ns", "+", "':edu:range'", "}", ",", "edge_type", "=", "dg", ".", "EdgeTypes", ".", "spanning_relation", ")" ]
Parameters ---------- edurange : etree.Element etree representation of a <edurange> element (annotation that groups a number of EDUs) <edu-range> seems to glue together a number of `<edu> elements, which may be scattered over a number of sentences <edu-range> may or may not contain a span attribute (it seems that the span attribute is present, when <edu-range> is a descendent of <sentence>) Example ------- <edu-range xml:id="edus9_3_1-5_0" span="s128_4..s130_7"> <node xml:id="s128_525" cat="SIMPX" func="--"> <edu xml:id="edu_9_3_1"> <discRel relation="Continuation" marking="-" arg2="edu_9_3_2"/> <node xml:id="s128_506" cat="VF" func="-" parent="s128_525"> <node xml:id="s128_505" cat="NX" func="ON" parent="s128_506"> <relation type="expletive"/> <word xml:id="s128_4" form="Es" pos="PPER" morph="nsn3" lemma="es" func="HD" parent="s128_505" dephead="s128_5" deprel="SUBJ"/> </node> </node> ... <edu-range xml:id="edus37_8_0-8_1"> <discRel relation="Restatement" marking="-" arg2="edu_37_9_0"/> <sentence xml:id="s660">
[ "Parameters", "----------", "edurange", ":", "etree", ".", "Element", "etree", "representation", "of", "a", "<edurange", ">", "element", "(", "annotation", "that", "groups", "a", "number", "of", "EDUs", ")", "<edu", "-", "range", ">", "seems", "to", "glue", "together", "a", "number", "of", "<edu", ">", "elements", "which", "may", "be", "scattered", "over", "a", "number", "of", "sentences", "<edu", "-", "range", ">", "may", "or", "may", "not", "contain", "a", "span", "attribute", "(", "it", "seems", "that", "the", "span", "attribute", "is", "present", "when", "<edu", "-", "range", ">", "is", "a", "descendent", "of", "<sentence", ">", ")" ]
python
train
twilio/twilio-python
twilio/rest/preview/sync/service/document/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/sync/service/document/__init__.py#L356-L370
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: DocumentContext for this DocumentInstance :rtype: twilio.rest.preview.sync.service.document.DocumentContext """ if self._context is None: self._context = DocumentContext( self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "DocumentContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_context" ]
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: DocumentContext for this DocumentInstance :rtype: twilio.rest.preview.sync.service.document.DocumentContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
vertexproject/synapse
synapse/lib/types.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/types.py#L1467-L1490
def _ctorCmprRange(self, vals): ''' Override default *range= handler to account for relative computation. ''' if not isinstance(vals, (list, tuple)): raise s_exc.BadCmprValu(valu=vals, cmpr='*range=') if len(vals) != 2: raise s_exc.BadCmprValu(valu=vals, cmpr='*range=') tick, tock = self.getTickTock(vals) if tick > tock: # User input has requested a nullset def cmpr(valu): return False return cmpr def cmpr(valu): return tick <= valu <= tock return cmpr
[ "def", "_ctorCmprRange", "(", "self", ",", "vals", ")", ":", "if", "not", "isinstance", "(", "vals", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "s_exc", ".", "BadCmprValu", "(", "valu", "=", "vals", ",", "cmpr", "=", "'*range='", ")", "if", "len", "(", "vals", ")", "!=", "2", ":", "raise", "s_exc", ".", "BadCmprValu", "(", "valu", "=", "vals", ",", "cmpr", "=", "'*range='", ")", "tick", ",", "tock", "=", "self", ".", "getTickTock", "(", "vals", ")", "if", "tick", ">", "tock", ":", "# User input has requested a nullset", "def", "cmpr", "(", "valu", ")", ":", "return", "False", "return", "cmpr", "def", "cmpr", "(", "valu", ")", ":", "return", "tick", "<=", "valu", "<=", "tock", "return", "cmpr" ]
Override default *range= handler to account for relative computation.
[ "Override", "default", "*", "range", "=", "handler", "to", "account", "for", "relative", "computation", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/variants.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/variants.py#L130-L136
def addVariantAnnotationSet(self, variantAnnotationSet): """ Adds the specified variantAnnotationSet to this dataset. """ id_ = variantAnnotationSet.getId() self._variantAnnotationSetIdMap[id_] = variantAnnotationSet self._variantAnnotationSetIds.append(id_)
[ "def", "addVariantAnnotationSet", "(", "self", ",", "variantAnnotationSet", ")", ":", "id_", "=", "variantAnnotationSet", ".", "getId", "(", ")", "self", ".", "_variantAnnotationSetIdMap", "[", "id_", "]", "=", "variantAnnotationSet", "self", ".", "_variantAnnotationSetIds", ".", "append", "(", "id_", ")" ]
Adds the specified variantAnnotationSet to this dataset.
[ "Adds", "the", "specified", "variantAnnotationSet", "to", "this", "dataset", "." ]
python
train
mitsei/dlkit
dlkit/handcar/learning/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/managers.py#L805-L827
def get_objective_search_session(self): """Gets the OsidSession associated with the objective search service. return: (osid.learning.ObjectiveSearchSession) - an ObjectiveSearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_objective_search() is false compliance: optional - This method must be implemented if supports_objective_search() is true. """ if not self.supports_objective_search(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ObjectiveSearchSession(runtime=self._runtime) except AttributeError: raise OperationFailed() return session
[ "def", "get_objective_search_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_objective_search", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "OperationFailed", "(", ")", "try", ":", "session", "=", "sessions", ".", "ObjectiveSearchSession", "(", "runtime", "=", "self", ".", "_runtime", ")", "except", "AttributeError", ":", "raise", "OperationFailed", "(", ")", "return", "session" ]
Gets the OsidSession associated with the objective search service. return: (osid.learning.ObjectiveSearchSession) - an ObjectiveSearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_objective_search() is false compliance: optional - This method must be implemented if supports_objective_search() is true.
[ "Gets", "the", "OsidSession", "associated", "with", "the", "objective", "search", "service", "." ]
python
train
opencobra/cobrapy
cobra/io/mat.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/io/mat.py#L268-L302
def model_to_pymatbridge(model, variable_name="model", matlab=None): """send the model to a MATLAB workspace through pymatbridge This model can then be manipulated through the COBRA toolbox Parameters ---------- variable_name : str The variable name to which the model will be assigned in the MATLAB workspace matlab : None or pymatbridge.Matlab instance The MATLAB workspace to which the variable will be sent. If this is None, then this will be sent to the same environment used in IPython magics. """ if scipy_sparse is None: raise ImportError("`model_to_pymatbridge` requires scipy!") if matlab is None: # assumed to be running an IPython magic from IPython import get_ipython matlab = get_ipython().magics_manager.registry["MatlabMagics"].Matlab model_info = create_mat_dict(model) S = model_info["S"].todok() model_info["S"] = 0 temp_S_name = "cobra_pymatbridge_temp_" + uuid4().hex _check(matlab.set_variable(variable_name, model_info)) _check(matlab.set_variable(temp_S_name, S)) _check(matlab.run_code("%s.S = %s;" % (variable_name, temp_S_name))) # all vectors need to be transposed for i in model_info.keys(): if i == "S": continue _check(matlab.run_code("{0}.{1} = {0}.{1}';".format(variable_name, i))) _check(matlab.run_code("clear %s;" % temp_S_name))
[ "def", "model_to_pymatbridge", "(", "model", ",", "variable_name", "=", "\"model\"", ",", "matlab", "=", "None", ")", ":", "if", "scipy_sparse", "is", "None", ":", "raise", "ImportError", "(", "\"`model_to_pymatbridge` requires scipy!\"", ")", "if", "matlab", "is", "None", ":", "# assumed to be running an IPython magic", "from", "IPython", "import", "get_ipython", "matlab", "=", "get_ipython", "(", ")", ".", "magics_manager", ".", "registry", "[", "\"MatlabMagics\"", "]", ".", "Matlab", "model_info", "=", "create_mat_dict", "(", "model", ")", "S", "=", "model_info", "[", "\"S\"", "]", ".", "todok", "(", ")", "model_info", "[", "\"S\"", "]", "=", "0", "temp_S_name", "=", "\"cobra_pymatbridge_temp_\"", "+", "uuid4", "(", ")", ".", "hex", "_check", "(", "matlab", ".", "set_variable", "(", "variable_name", ",", "model_info", ")", ")", "_check", "(", "matlab", ".", "set_variable", "(", "temp_S_name", ",", "S", ")", ")", "_check", "(", "matlab", ".", "run_code", "(", "\"%s.S = %s;\"", "%", "(", "variable_name", ",", "temp_S_name", ")", ")", ")", "# all vectors need to be transposed", "for", "i", "in", "model_info", ".", "keys", "(", ")", ":", "if", "i", "==", "\"S\"", ":", "continue", "_check", "(", "matlab", ".", "run_code", "(", "\"{0}.{1} = {0}.{1}';\"", ".", "format", "(", "variable_name", ",", "i", ")", ")", ")", "_check", "(", "matlab", ".", "run_code", "(", "\"clear %s;\"", "%", "temp_S_name", ")", ")" ]
send the model to a MATLAB workspace through pymatbridge This model can then be manipulated through the COBRA toolbox Parameters ---------- variable_name : str The variable name to which the model will be assigned in the MATLAB workspace matlab : None or pymatbridge.Matlab instance The MATLAB workspace to which the variable will be sent. If this is None, then this will be sent to the same environment used in IPython magics.
[ "send", "the", "model", "to", "a", "MATLAB", "workspace", "through", "pymatbridge" ]
python
valid
welbornprod/colr
examples/walk_dir.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/examples/walk_dir.py#L91-L121
def walk_dir_animated(path, maxdircnt=1000): """ Walk a directory, printing status updates along the way. """ p = AnimatedProgress( 'Walking {}...'.format(path), frames=Frames.dots_orbit.as_rainbow(), show_time=True, ) rootcnt = 0 print('\nStarting animated progress.') with p: for root, dirs, files in os.walk(path): rootcnt += 1 if rootcnt % 50 == 0: p.text = 'Walking {}...'.format(C(root, 'cyan')) if rootcnt > maxdircnt: # Stop is called because we are printing before the # AnimatedProgress is finished running. p.stop() print('\nFinished walking {} directories.'.format( C(maxdircnt, 'blue', style='bright') )) break else: # AnimatedProgress still running, `stop` it before printing. p.stop() print_err('\nNever made it to {} directories ({}).'.format( C(maxdircnt, 'blue', style='bright'), C(rootcnt, 'red', style='bright'), )) print('\nFinished with animated progress.') return 0
[ "def", "walk_dir_animated", "(", "path", ",", "maxdircnt", "=", "1000", ")", ":", "p", "=", "AnimatedProgress", "(", "'Walking {}...'", ".", "format", "(", "path", ")", ",", "frames", "=", "Frames", ".", "dots_orbit", ".", "as_rainbow", "(", ")", ",", "show_time", "=", "True", ",", ")", "rootcnt", "=", "0", "print", "(", "'\\nStarting animated progress.'", ")", "with", "p", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "rootcnt", "+=", "1", "if", "rootcnt", "%", "50", "==", "0", ":", "p", ".", "text", "=", "'Walking {}...'", ".", "format", "(", "C", "(", "root", ",", "'cyan'", ")", ")", "if", "rootcnt", ">", "maxdircnt", ":", "# Stop is called because we are printing before the", "# AnimatedProgress is finished running.", "p", ".", "stop", "(", ")", "print", "(", "'\\nFinished walking {} directories.'", ".", "format", "(", "C", "(", "maxdircnt", ",", "'blue'", ",", "style", "=", "'bright'", ")", ")", ")", "break", "else", ":", "# AnimatedProgress still running, `stop` it before printing.", "p", ".", "stop", "(", ")", "print_err", "(", "'\\nNever made it to {} directories ({}).'", ".", "format", "(", "C", "(", "maxdircnt", ",", "'blue'", ",", "style", "=", "'bright'", ")", ",", "C", "(", "rootcnt", ",", "'red'", ",", "style", "=", "'bright'", ")", ",", ")", ")", "print", "(", "'\\nFinished with animated progress.'", ")", "return", "0" ]
Walk a directory, printing status updates along the way.
[ "Walk", "a", "directory", "printing", "status", "updates", "along", "the", "way", "." ]
python
train
vertexproject/synapse
synapse/daemon.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/daemon.py#L175-L192
def share(self, name, item): ''' Share an object via the telepath protocol. Args: name (str): Name of the shared object item (object): The object to share over telepath. ''' try: if isinstance(item, s_telepath.Aware): item.onTeleShare(self, name) self.shared[name] = item except Exception: logger.exception(f'onTeleShare() error for: {name}')
[ "def", "share", "(", "self", ",", "name", ",", "item", ")", ":", "try", ":", "if", "isinstance", "(", "item", ",", "s_telepath", ".", "Aware", ")", ":", "item", ".", "onTeleShare", "(", "self", ",", "name", ")", "self", ".", "shared", "[", "name", "]", "=", "item", "except", "Exception", ":", "logger", ".", "exception", "(", "f'onTeleShare() error for: {name}'", ")" ]
Share an object via the telepath protocol. Args: name (str): Name of the shared object item (object): The object to share over telepath.
[ "Share", "an", "object", "via", "the", "telepath", "protocol", "." ]
python
train
MacHu-GWU/pymongo_mate-project
pymongo_mate/pkg/pandas_mate/pkg/rolex/__init__.py
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/pkg/pandas_mate/pkg/rolex/__init__.py#L217-L240
def to_utctimestamp(self, dt): """Calculate number of seconds from UTC 1970-01-01 00:00:00. When: - dt doesn't have tzinfo: assume it's a utc time - dt has tzinfo: use tzinfo WARNING, if your datetime object doens't have ``tzinfo``, make sure it's a UTC time, but **NOT a LOCAL TIME**. **中文文档** 计算时间戳 若: - 不带tzinfo: 则默认为是UTC time - 带tzinfo: 使用tzinfo """ if dt.tzinfo is None: dt = dt.replace(tzinfo=utc) delta = dt - datetime(1970, 1, 1, tzinfo=utc) return delta.total_seconds()
[ "def", "to_utctimestamp", "(", "self", ",", "dt", ")", ":", "if", "dt", ".", "tzinfo", "is", "None", ":", "dt", "=", "dt", ".", "replace", "(", "tzinfo", "=", "utc", ")", "delta", "=", "dt", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "utc", ")", "return", "delta", ".", "total_seconds", "(", ")" ]
Calculate number of seconds from UTC 1970-01-01 00:00:00. When: - dt doesn't have tzinfo: assume it's a utc time - dt has tzinfo: use tzinfo WARNING, if your datetime object doens't have ``tzinfo``, make sure it's a UTC time, but **NOT a LOCAL TIME**. **中文文档** 计算时间戳 若: - 不带tzinfo: 则默认为是UTC time - 带tzinfo: 使用tzinfo
[ "Calculate", "number", "of", "seconds", "from", "UTC", "1970", "-", "01", "-", "01", "00", ":", "00", ":", "00", "." ]
python
train
phaethon/kamene
kamene/sendrecv.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/sendrecv.py#L629-L692
def bridge_and_sniff(if1, if2, count=0, store=1, offline=None, prn = None, lfilter=None, L2socket=None, timeout=None, stop_filter=None, stop_callback=None, *args, **kargs): """Forward traffic between two interfaces and sniff packets exchanged bridge_and_sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2Socket args) -> list of packets count: number of packets to capture. 0 means infinity store: wether to store sniffed packets or discard them prn: function to apply to each packet. If something is returned, it is displayed. Ex: ex: prn = lambda x: x.summary() lfilter: python function applied to each packet to determine if further action may be done ex: lfilter = lambda x: x.haslayer(Padding) timeout: stop sniffing after a given time (default: None) L2socket: use the provided L2socket stop_filter: python function applied to each packet to determine if we have to stop the capture after this packet ex: stop_filter = lambda x: x.haslayer(TCP) stop_callback: Call every loop to determine if we need to stop the capture """ c = 0 if L2socket is None: L2socket = conf.L2socket s1 = L2socket(iface=if1) s2 = L2socket(iface=if2) peerof={s1:s2,s2:s1} label={s1:if1, s2:if2} lst = [] if timeout is not None: stoptime = time.time()+timeout remain = None try: while True: if timeout is not None: remain = stoptime-time.time() if remain <= 0: break if stop_callback and stop_callback(): break ins,outs,errs = select([s1,s2],[],[], remain) for s in ins: p = s.recv() if p is not None: peerof[s].send(p.original) if lfilter and not lfilter(p): continue if store: p.sniffed_on = label[s] lst.append(p) c += 1 if prn: r = prn(p) if r is not None: print("%s: %s" % (label[s],r)) if stop_filter and stop_filter(p): break if count > 0 and c >= count: break except KeyboardInterrupt: pass finally: return plist.PacketList(lst,"Sniffed")
[ "def", "bridge_and_sniff", "(", "if1", ",", "if2", ",", "count", "=", "0", ",", "store", "=", "1", ",", "offline", "=", "None", ",", "prn", "=", "None", ",", "lfilter", "=", "None", ",", "L2socket", "=", "None", ",", "timeout", "=", "None", ",", "stop_filter", "=", "None", ",", "stop_callback", "=", "None", ",", "*", "args", ",", "*", "*", "kargs", ")", ":", "c", "=", "0", "if", "L2socket", "is", "None", ":", "L2socket", "=", "conf", ".", "L2socket", "s1", "=", "L2socket", "(", "iface", "=", "if1", ")", "s2", "=", "L2socket", "(", "iface", "=", "if2", ")", "peerof", "=", "{", "s1", ":", "s2", ",", "s2", ":", "s1", "}", "label", "=", "{", "s1", ":", "if1", ",", "s2", ":", "if2", "}", "lst", "=", "[", "]", "if", "timeout", "is", "not", "None", ":", "stoptime", "=", "time", ".", "time", "(", ")", "+", "timeout", "remain", "=", "None", "try", ":", "while", "True", ":", "if", "timeout", "is", "not", "None", ":", "remain", "=", "stoptime", "-", "time", ".", "time", "(", ")", "if", "remain", "<=", "0", ":", "break", "if", "stop_callback", "and", "stop_callback", "(", ")", ":", "break", "ins", ",", "outs", ",", "errs", "=", "select", "(", "[", "s1", ",", "s2", "]", ",", "[", "]", ",", "[", "]", ",", "remain", ")", "for", "s", "in", "ins", ":", "p", "=", "s", ".", "recv", "(", ")", "if", "p", "is", "not", "None", ":", "peerof", "[", "s", "]", ".", "send", "(", "p", ".", "original", ")", "if", "lfilter", "and", "not", "lfilter", "(", "p", ")", ":", "continue", "if", "store", ":", "p", ".", "sniffed_on", "=", "label", "[", "s", "]", "lst", ".", "append", "(", "p", ")", "c", "+=", "1", "if", "prn", ":", "r", "=", "prn", "(", "p", ")", "if", "r", "is", "not", "None", ":", "print", "(", "\"%s: %s\"", "%", "(", "label", "[", "s", "]", ",", "r", ")", ")", "if", "stop_filter", "and", "stop_filter", "(", "p", ")", ":", "break", "if", "count", ">", "0", "and", "c", ">=", "count", ":", "break", "except", "KeyboardInterrupt", ":", "pass", "finally", ":", "return", "plist", ".", "PacketList", "(", "lst", ",", "\"Sniffed\"", ")" ]
Forward traffic between two interfaces and sniff packets exchanged bridge_and_sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2Socket args) -> list of packets count: number of packets to capture. 0 means infinity store: wether to store sniffed packets or discard them prn: function to apply to each packet. If something is returned, it is displayed. Ex: ex: prn = lambda x: x.summary() lfilter: python function applied to each packet to determine if further action may be done ex: lfilter = lambda x: x.haslayer(Padding) timeout: stop sniffing after a given time (default: None) L2socket: use the provided L2socket stop_filter: python function applied to each packet to determine if we have to stop the capture after this packet ex: stop_filter = lambda x: x.haslayer(TCP) stop_callback: Call every loop to determine if we need to stop the capture
[ "Forward", "traffic", "between", "two", "interfaces", "and", "sniff", "packets", "exchanged", "bridge_and_sniff", "(", "[", "count", "=", "0", "]", "[", "prn", "=", "None", "]", "[", "store", "=", "1", "]", "[", "offline", "=", "None", "]", "[", "lfilter", "=", "None", "]", "+", "L2Socket", "args", ")", "-", ">", "list", "of", "packets" ]
python
train
hydpy-dev/hydpy
hydpy/auxs/armatools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/armatools.py#L676-L692
def calc_next_ma_coef(self, ma_order, ma_model): """Determine the MA coefficients of the ARMA model based on its predetermined AR coefficients and the MA ordinates of the given |MA| model. The MA coefficients are determined one at a time, beginning with the first one. Each ARMA MA coefficient in set in a manner that allows for the exact reproduction of the equivalent pure MA coefficient with all relevant ARMA coefficients. """ idx = ma_order-1 coef = ma_model.coefs[idx] for jdx, ar_coef in enumerate(self.ar_coefs): zdx = idx-jdx-1 if zdx >= 0: coef -= ar_coef*ma_model.coefs[zdx] self.ma_coefs = numpy.concatenate((self.ma_coefs, [coef]))
[ "def", "calc_next_ma_coef", "(", "self", ",", "ma_order", ",", "ma_model", ")", ":", "idx", "=", "ma_order", "-", "1", "coef", "=", "ma_model", ".", "coefs", "[", "idx", "]", "for", "jdx", ",", "ar_coef", "in", "enumerate", "(", "self", ".", "ar_coefs", ")", ":", "zdx", "=", "idx", "-", "jdx", "-", "1", "if", "zdx", ">=", "0", ":", "coef", "-=", "ar_coef", "*", "ma_model", ".", "coefs", "[", "zdx", "]", "self", ".", "ma_coefs", "=", "numpy", ".", "concatenate", "(", "(", "self", ".", "ma_coefs", ",", "[", "coef", "]", ")", ")" ]
Determine the MA coefficients of the ARMA model based on its predetermined AR coefficients and the MA ordinates of the given |MA| model. The MA coefficients are determined one at a time, beginning with the first one. Each ARMA MA coefficient in set in a manner that allows for the exact reproduction of the equivalent pure MA coefficient with all relevant ARMA coefficients.
[ "Determine", "the", "MA", "coefficients", "of", "the", "ARMA", "model", "based", "on", "its", "predetermined", "AR", "coefficients", "and", "the", "MA", "ordinates", "of", "the", "given", "|MA|", "model", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L161-L173
def add_highdepth_genome_exclusion(items): """Add exclusions to input items to avoid slow runtimes on whole genomes. """ out = [] for d in items: d = utils.deepish_copy(d) if dd.get_coverage_interval(d) == "genome": e = dd.get_exclude_regions(d) if "highdepth" not in e: e.append("highdepth") d = dd.set_exclude_regions(d, e) out.append(d) return out
[ "def", "add_highdepth_genome_exclusion", "(", "items", ")", ":", "out", "=", "[", "]", "for", "d", "in", "items", ":", "d", "=", "utils", ".", "deepish_copy", "(", "d", ")", "if", "dd", ".", "get_coverage_interval", "(", "d", ")", "==", "\"genome\"", ":", "e", "=", "dd", ".", "get_exclude_regions", "(", "d", ")", "if", "\"highdepth\"", "not", "in", "e", ":", "e", ".", "append", "(", "\"highdepth\"", ")", "d", "=", "dd", ".", "set_exclude_regions", "(", "d", ",", "e", ")", "out", ".", "append", "(", "d", ")", "return", "out" ]
Add exclusions to input items to avoid slow runtimes on whole genomes.
[ "Add", "exclusions", "to", "input", "items", "to", "avoid", "slow", "runtimes", "on", "whole", "genomes", "." ]
python
train
andrewsnowden/dota2py
dota2py/api.py
https://github.com/andrewsnowden/dota2py/blob/67637f4b9c160ea90c11b7e81545baf350affa7a/dota2py/api.py#L152-L158
def get_steam_id(vanityurl, **kwargs): """ Get a players steam id from their steam name/vanity url """ params = {"vanityurl": vanityurl} return make_request("ResolveVanityURL", params, version="v0001", base="http://api.steampowered.com/ISteamUser/", **kwargs)
[ "def", "get_steam_id", "(", "vanityurl", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "\"vanityurl\"", ":", "vanityurl", "}", "return", "make_request", "(", "\"ResolveVanityURL\"", ",", "params", ",", "version", "=", "\"v0001\"", ",", "base", "=", "\"http://api.steampowered.com/ISteamUser/\"", ",", "*", "*", "kwargs", ")" ]
Get a players steam id from their steam name/vanity url
[ "Get", "a", "players", "steam", "id", "from", "their", "steam", "name", "/", "vanity", "url" ]
python
train
jasonbot/arcrest
arcrest/server.py
https://github.com/jasonbot/arcrest/blob/b1ba71fd59bb6349415e7879d753d307dbc0da26/arcrest/server.py#L1043-L1053
def candidates(self): """A list of candidate addresses (as dictionaries) from a geocode operation""" # convert x['location'] to a point from a json point struct def cditer(): for candidate in self._json_struct['candidates']: newcandidate = candidate.copy() newcandidate['location'] = \ geometry.fromJson(newcandidate['location']) yield newcandidate return list(cditer())
[ "def", "candidates", "(", "self", ")", ":", "# convert x['location'] to a point from a json point struct", "def", "cditer", "(", ")", ":", "for", "candidate", "in", "self", ".", "_json_struct", "[", "'candidates'", "]", ":", "newcandidate", "=", "candidate", ".", "copy", "(", ")", "newcandidate", "[", "'location'", "]", "=", "geometry", ".", "fromJson", "(", "newcandidate", "[", "'location'", "]", ")", "yield", "newcandidate", "return", "list", "(", "cditer", "(", ")", ")" ]
A list of candidate addresses (as dictionaries) from a geocode operation
[ "A", "list", "of", "candidate", "addresses", "(", "as", "dictionaries", ")", "from", "a", "geocode", "operation" ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L3573-L3589
def schedule_host_check(self, host, check_time): """Schedule a check on a host Format of the line that triggers function call:: SCHEDULE_HOST_CHECK;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: :return: None """ host.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=False, force_time=check_time) self.send_an_element(host.get_update_status_brok())
[ "def", "schedule_host_check", "(", "self", ",", "host", ",", "check_time", ")", ":", "host", ".", "schedule", "(", "self", ".", "daemon", ".", "hosts", ",", "self", ".", "daemon", ".", "services", ",", "self", ".", "daemon", ".", "timeperiods", ",", "self", ".", "daemon", ".", "macromodulations", ",", "self", ".", "daemon", ".", "checkmodulations", ",", "self", ".", "daemon", ".", "checks", ",", "force", "=", "False", ",", "force_time", "=", "check_time", ")", "self", ".", "send_an_element", "(", "host", ".", "get_update_status_brok", "(", ")", ")" ]
Schedule a check on a host Format of the line that triggers function call:: SCHEDULE_HOST_CHECK;<host_name>;<check_time> :param host: host to check :type host: alignak.object.host.Host :param check_time: time to check :type check_time: :return: None
[ "Schedule", "a", "check", "on", "a", "host", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
Rapptz/discord.py
discord/webhook.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/webhook.py#L645-L733
def send(self, content=None, *, wait=False, username=None, avatar_url=None, tts=False, file=None, files=None, embed=None, embeds=None): """|maybecoro| Sends a message using the webhook. If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is not a coroutine. The content must be a type that can convert to a string through ``str(content)``. To upload a single file, the ``file`` parameter should be used with a single :class:`File` object. If the ``embed`` parameter is provided, it must be of type :class:`Embed` and it must be a rich embed type. You cannot mix the ``embed`` parameter with the ``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send. Parameters ------------ content: :class:`str` The content of the message to send. wait: :class:`bool` Whether the server should wait before sending a response. This essentially means that the return type of this function changes from ``None`` to a :class:`Message` if set to ``True``. username: :class:`str` The username to send with this message. If no username is provided then the default username for the webhook is used. avatar_url: Union[:class:`str`, :class:`Asset`] The avatar URL to send with this message. If no avatar URL is provided then the default avatar for the webhook is used. tts: :class:`bool` Indicates if the message should be sent using text-to-speech. file: :class:`File` The file to upload. This cannot be mixed with ``files`` parameter. files: List[:class:`File`] A list of files to send with the content. This cannot be mixed with the ``file`` parameter. embed: :class:`Embed` The rich embed for the content to send. This cannot be mixed with ``embeds`` parameter. embeds: List[:class:`Embed`] A list of embeds to send with the content. Maximum of 10. This cannot be mixed with the ``embed`` parameter. Raises -------- HTTPException Sending the message failed. NotFound This webhook was not found. Forbidden The authorization token for the webhook is incorrect. InvalidArgument You specified both ``embed`` and ``embeds`` or the length of ``embeds`` was invalid. Returns --------- Optional[:class:`Message`] The message that was sent. """ payload = {} if files is not None and file is not None: raise InvalidArgument('Cannot mix file and files keyword arguments.') if embeds is not None and embed is not None: raise InvalidArgument('Cannot mix embed and embeds keyword arguments.') if embeds is not None: if len(embeds) > 10: raise InvalidArgument('embeds has a maximum of 10 elements.') payload['embeds'] = [e.to_dict() for e in embeds] if embed is not None: payload['embeds'] = [embed.to_dict()] if content is not None: payload['content'] = str(content) payload['tts'] = tts if avatar_url: payload['avatar_url'] = str(avatar_url) if username: payload['username'] = username return self._adapter.execute_webhook(wait=wait, file=file, files=files, payload=payload)
[ "def", "send", "(", "self", ",", "content", "=", "None", ",", "*", ",", "wait", "=", "False", ",", "username", "=", "None", ",", "avatar_url", "=", "None", ",", "tts", "=", "False", ",", "file", "=", "None", ",", "files", "=", "None", ",", "embed", "=", "None", ",", "embeds", "=", "None", ")", ":", "payload", "=", "{", "}", "if", "files", "is", "not", "None", "and", "file", "is", "not", "None", ":", "raise", "InvalidArgument", "(", "'Cannot mix file and files keyword arguments.'", ")", "if", "embeds", "is", "not", "None", "and", "embed", "is", "not", "None", ":", "raise", "InvalidArgument", "(", "'Cannot mix embed and embeds keyword arguments.'", ")", "if", "embeds", "is", "not", "None", ":", "if", "len", "(", "embeds", ")", ">", "10", ":", "raise", "InvalidArgument", "(", "'embeds has a maximum of 10 elements.'", ")", "payload", "[", "'embeds'", "]", "=", "[", "e", ".", "to_dict", "(", ")", "for", "e", "in", "embeds", "]", "if", "embed", "is", "not", "None", ":", "payload", "[", "'embeds'", "]", "=", "[", "embed", ".", "to_dict", "(", ")", "]", "if", "content", "is", "not", "None", ":", "payload", "[", "'content'", "]", "=", "str", "(", "content", ")", "payload", "[", "'tts'", "]", "=", "tts", "if", "avatar_url", ":", "payload", "[", "'avatar_url'", "]", "=", "str", "(", "avatar_url", ")", "if", "username", ":", "payload", "[", "'username'", "]", "=", "username", "return", "self", ".", "_adapter", ".", "execute_webhook", "(", "wait", "=", "wait", ",", "file", "=", "file", ",", "files", "=", "files", ",", "payload", "=", "payload", ")" ]
|maybecoro| Sends a message using the webhook. If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is not a coroutine. The content must be a type that can convert to a string through ``str(content)``. To upload a single file, the ``file`` parameter should be used with a single :class:`File` object. If the ``embed`` parameter is provided, it must be of type :class:`Embed` and it must be a rich embed type. You cannot mix the ``embed`` parameter with the ``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send. Parameters ------------ content: :class:`str` The content of the message to send. wait: :class:`bool` Whether the server should wait before sending a response. This essentially means that the return type of this function changes from ``None`` to a :class:`Message` if set to ``True``. username: :class:`str` The username to send with this message. If no username is provided then the default username for the webhook is used. avatar_url: Union[:class:`str`, :class:`Asset`] The avatar URL to send with this message. If no avatar URL is provided then the default avatar for the webhook is used. tts: :class:`bool` Indicates if the message should be sent using text-to-speech. file: :class:`File` The file to upload. This cannot be mixed with ``files`` parameter. files: List[:class:`File`] A list of files to send with the content. This cannot be mixed with the ``file`` parameter. embed: :class:`Embed` The rich embed for the content to send. This cannot be mixed with ``embeds`` parameter. embeds: List[:class:`Embed`] A list of embeds to send with the content. Maximum of 10. This cannot be mixed with the ``embed`` parameter. Raises -------- HTTPException Sending the message failed. NotFound This webhook was not found. Forbidden The authorization token for the webhook is incorrect. InvalidArgument You specified both ``embed`` and ``embeds`` or the length of ``embeds`` was invalid. Returns --------- Optional[:class:`Message`] The message that was sent.
[ "|maybecoro|" ]
python
train
Parquery/icontract
icontract/_metaclass.py
https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_metaclass.py#L16-L31
def _collapse_invariants(bases: List[type], namespace: MutableMapping[str, Any]) -> None: """Collect invariants from the bases and merge them with the invariants in the namespace.""" invariants = [] # type: List[Contract] # Add invariants of the bases for base in bases: if hasattr(base, "__invariants__"): invariants.extend(getattr(base, "__invariants__")) # Add invariants in the current namespace if '__invariants__' in namespace: invariants.extend(namespace['__invariants__']) # Change the final invariants in the namespace if invariants: namespace["__invariants__"] = invariants
[ "def", "_collapse_invariants", "(", "bases", ":", "List", "[", "type", "]", ",", "namespace", ":", "MutableMapping", "[", "str", ",", "Any", "]", ")", "->", "None", ":", "invariants", "=", "[", "]", "# type: List[Contract]", "# Add invariants of the bases", "for", "base", "in", "bases", ":", "if", "hasattr", "(", "base", ",", "\"__invariants__\"", ")", ":", "invariants", ".", "extend", "(", "getattr", "(", "base", ",", "\"__invariants__\"", ")", ")", "# Add invariants in the current namespace", "if", "'__invariants__'", "in", "namespace", ":", "invariants", ".", "extend", "(", "namespace", "[", "'__invariants__'", "]", ")", "# Change the final invariants in the namespace", "if", "invariants", ":", "namespace", "[", "\"__invariants__\"", "]", "=", "invariants" ]
Collect invariants from the bases and merge them with the invariants in the namespace.
[ "Collect", "invariants", "from", "the", "bases", "and", "merge", "them", "with", "the", "invariants", "in", "the", "namespace", "." ]
python
train
luizirber/bioinfo
bioinfo/__init__.py
https://github.com/luizirber/bioinfo/blob/756758f62b4f7136cdb54de2f79033ff0226a6b7/bioinfo/__init__.py#L37-L46
def main(): '''Main entry point for the bioinfo CLI.''' args = docopt(__doc__, version=__version__) if 'bam_coverage' in args: bam_coverage(args['<reference>'], args['<alignments>'], int(args['<minmatch>']), min_mapq=int(args['--mapq']), min_len=float(args['--minlen']))
[ "def", "main", "(", ")", ":", "args", "=", "docopt", "(", "__doc__", ",", "version", "=", "__version__", ")", "if", "'bam_coverage'", "in", "args", ":", "bam_coverage", "(", "args", "[", "'<reference>'", "]", ",", "args", "[", "'<alignments>'", "]", ",", "int", "(", "args", "[", "'<minmatch>'", "]", ")", ",", "min_mapq", "=", "int", "(", "args", "[", "'--mapq'", "]", ")", ",", "min_len", "=", "float", "(", "args", "[", "'--minlen'", "]", ")", ")" ]
Main entry point for the bioinfo CLI.
[ "Main", "entry", "point", "for", "the", "bioinfo", "CLI", "." ]
python
train
minhhoit/yacms
yacms/forms/forms.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/forms/forms.py#L321-L329
def columns(self): """ Returns the list of selected column names. """ fields = [f.label for f in self.form_fields if self.cleaned_data["field_%s_export" % f.id]] if self.cleaned_data["field_0_export"]: fields.append(self.entry_time_name) return fields
[ "def", "columns", "(", "self", ")", ":", "fields", "=", "[", "f", ".", "label", "for", "f", "in", "self", ".", "form_fields", "if", "self", ".", "cleaned_data", "[", "\"field_%s_export\"", "%", "f", ".", "id", "]", "]", "if", "self", ".", "cleaned_data", "[", "\"field_0_export\"", "]", ":", "fields", ".", "append", "(", "self", ".", "entry_time_name", ")", "return", "fields" ]
Returns the list of selected column names.
[ "Returns", "the", "list", "of", "selected", "column", "names", "." ]
python
train
saltstack/salt
salt/utils/network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L1118-L1137
def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg
[ "def", "_hw_addr_aix", "(", "iface", ")", ":", "cmd", "=", "subprocess", ".", "Popen", "(", "'entstat -d {0} | grep \\'Hardware Address\\''", ".", "format", "(", "iface", ")", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", ".", "communicate", "(", ")", "[", "0", "]", "if", "cmd", ":", "comps", "=", "cmd", ".", "split", "(", "' '", ")", "if", "len", "(", "comps", ")", "==", "3", ":", "mac_addr", "=", "comps", "[", "2", "]", ".", "strip", "(", "'\\''", ")", ".", "strip", "(", ")", "return", "mac_addr", "error_msg", "=", "(", "'Interface \"{0}\" either not available or does not contain a hardware address'", ".", "format", "(", "iface", ")", ")", "log", ".", "error", "(", "error_msg", ")", "return", "error_msg" ]
Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces
[ "Return", "the", "hardware", "address", "(", "a", ".", "k", ".", "a", ".", "MAC", "address", ")", "for", "a", "given", "interface", "on", "AIX", "MAC", "address", "not", "available", "in", "through", "interfaces" ]
python
train
postlund/pyatv
pyatv/mrp/messages.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/messages.py#L73-L82
def register_hid_device(screen_width, screen_height, absolute=False, integrated_display=False): """Create a new REGISTER_HID_DEVICE_MESSAGE.""" message = create(protobuf.REGISTER_HID_DEVICE_MESSAGE) descriptor = message.inner().deviceDescriptor descriptor.absolute = 1 if absolute else 0 descriptor.integratedDisplay = 1 if integrated_display else 0 descriptor.screenSizeWidth = screen_width descriptor.screenSizeHeight = screen_height return message
[ "def", "register_hid_device", "(", "screen_width", ",", "screen_height", ",", "absolute", "=", "False", ",", "integrated_display", "=", "False", ")", ":", "message", "=", "create", "(", "protobuf", ".", "REGISTER_HID_DEVICE_MESSAGE", ")", "descriptor", "=", "message", ".", "inner", "(", ")", ".", "deviceDescriptor", "descriptor", ".", "absolute", "=", "1", "if", "absolute", "else", "0", "descriptor", ".", "integratedDisplay", "=", "1", "if", "integrated_display", "else", "0", "descriptor", ".", "screenSizeWidth", "=", "screen_width", "descriptor", ".", "screenSizeHeight", "=", "screen_height", "return", "message" ]
Create a new REGISTER_HID_DEVICE_MESSAGE.
[ "Create", "a", "new", "REGISTER_HID_DEVICE_MESSAGE", "." ]
python
train
authomatic/authomatic
authomatic/providers/__init__.py
https://github.com/authomatic/authomatic/blob/90a9ce60cc405ae8a2bf5c3713acd5d78579a04e/authomatic/providers/__init__.py#L336-L353
def _log(cls, level, msg, **kwargs): """ Logs a message with pre-formatted prefix. :param int level: Logging level as specified in the `login module <http://docs.python.org/2/library/logging.html>`_ of Python standard library. :param str msg: The actual message. """ logger = getattr(cls, '_logger', None) or authomatic.core._logger logger.log( level, ': '.join( ('authomatic', cls.__name__, msg)), **kwargs)
[ "def", "_log", "(", "cls", ",", "level", ",", "msg", ",", "*", "*", "kwargs", ")", ":", "logger", "=", "getattr", "(", "cls", ",", "'_logger'", ",", "None", ")", "or", "authomatic", ".", "core", ".", "_logger", "logger", ".", "log", "(", "level", ",", "': '", ".", "join", "(", "(", "'authomatic'", ",", "cls", ".", "__name__", ",", "msg", ")", ")", ",", "*", "*", "kwargs", ")" ]
Logs a message with pre-formatted prefix. :param int level: Logging level as specified in the `login module <http://docs.python.org/2/library/logging.html>`_ of Python standard library. :param str msg: The actual message.
[ "Logs", "a", "message", "with", "pre", "-", "formatted", "prefix", "." ]
python
test
bpannier/simpletr64
simpletr64/devicetr64.py
https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/devicetr64.py#L836-L923
def _loadSCPD(self, serviceType, timeout): """Internal method to load the action definitions. :param str serviceType: the service type to load :param int timeout: the timeout for downloading """ if serviceType not in self.__deviceServiceDefinitions.keys(): raise ValueError("Can not load SCPD, no service type defined for: " + serviceType) if "scpdURL" not in self.__deviceServiceDefinitions[serviceType].keys(): raise ValueError("No SCPD URL defined for: " + serviceType) # remove actions for given service type self.__deviceSCPD.pop(serviceType, None) uri = self.__deviceServiceDefinitions[serviceType]["scpdURL"] # setup proxies proxies = {} if self.__httpsProxy: proxies = {"https": self.__httpsProxy} if self.__httpProxy: proxies = {"http": self.__httpProxy} # setup authentication auth = None if self.__password: auth = HTTPDigestAuth(self.__username, self.__password) # build the URL location = self.__protocol + "://" + self.__hostname + ":" + str(self.port) + uri # some devices response differently without a User-Agent headers = {"User-Agent": "Mozilla/5.0; SimpleTR64-2"} # http request request = requests.get(location, auth=auth, proxies=proxies, headers=headers, timeout=timeout, verify=self.__verify) if request.status_code != 200: errorStr = DeviceTR64._extractErrorString(request) raise ValueError('Could not load SCPD for "' + serviceType + '" from ' + location + ': ' + str(request.status_code) + ' - ' + request.reason + " -- " + errorStr) data = request.text.encode('utf-8') if len(data) == 0: return # parse XML return try: root = ET.fromstring(data) except Exception as e: raise ValueError("Can not parse SCPD content for '" + serviceType + "' from '" + location + "': " + str(e)) actions = {} variableTypes = {} variableParameterDict = {} # iterate through the full XML tree for element in root.getchildren(): tagName = element.tag.lower() # go deeper for action lists if tagName.endswith("actionlist"): # remember the actions and where a specific variable gets referenced self._parseSCPDActions(element, actions, variableParameterDict) # go deeper for the variable declarations elif tagName.endswith("servicestatetable"): self._parseSCPDVariableTypes(element, variableTypes) # everything have been parsed now merge the variable declarations into the action parameters for name in variableParameterDict.keys(): if name not in variableTypes.keys(): raise ValueError("Variable reference in action can not be resolved: " + name) # iterate through all arguments where this variable have been referenced for argument in variableParameterDict[name]: # fill in the type of this variable/argument argument["dataType"] = variableTypes[name]["dataType"] # if the variable declaration includes a default value add it to the action parameter as well if "defaultValue" in variableTypes[name].keys(): argument["defaultValue"] = variableTypes[name]["defaultValue"] self.__deviceSCPD[serviceType] = actions
[ "def", "_loadSCPD", "(", "self", ",", "serviceType", ",", "timeout", ")", ":", "if", "serviceType", "not", "in", "self", ".", "__deviceServiceDefinitions", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "\"Can not load SCPD, no service type defined for: \"", "+", "serviceType", ")", "if", "\"scpdURL\"", "not", "in", "self", ".", "__deviceServiceDefinitions", "[", "serviceType", "]", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "\"No SCPD URL defined for: \"", "+", "serviceType", ")", "# remove actions for given service type", "self", ".", "__deviceSCPD", ".", "pop", "(", "serviceType", ",", "None", ")", "uri", "=", "self", ".", "__deviceServiceDefinitions", "[", "serviceType", "]", "[", "\"scpdURL\"", "]", "# setup proxies", "proxies", "=", "{", "}", "if", "self", ".", "__httpsProxy", ":", "proxies", "=", "{", "\"https\"", ":", "self", ".", "__httpsProxy", "}", "if", "self", ".", "__httpProxy", ":", "proxies", "=", "{", "\"http\"", ":", "self", ".", "__httpProxy", "}", "# setup authentication", "auth", "=", "None", "if", "self", ".", "__password", ":", "auth", "=", "HTTPDigestAuth", "(", "self", ".", "__username", ",", "self", ".", "__password", ")", "# build the URL", "location", "=", "self", ".", "__protocol", "+", "\"://\"", "+", "self", ".", "__hostname", "+", "\":\"", "+", "str", "(", "self", ".", "port", ")", "+", "uri", "# some devices response differently without a User-Agent", "headers", "=", "{", "\"User-Agent\"", ":", "\"Mozilla/5.0; SimpleTR64-2\"", "}", "# http request", "request", "=", "requests", ".", "get", "(", "location", ",", "auth", "=", "auth", ",", "proxies", "=", "proxies", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ",", "verify", "=", "self", ".", "__verify", ")", "if", "request", ".", "status_code", "!=", "200", ":", "errorStr", "=", "DeviceTR64", ".", "_extractErrorString", "(", "request", ")", "raise", "ValueError", "(", "'Could not load SCPD for \"'", "+", "serviceType", "+", "'\" from '", "+", "location", "+", "': '", "+", "str", "(", "request", ".", "status_code", ")", "+", "' - '", "+", "request", ".", "reason", "+", "\" -- \"", "+", "errorStr", ")", "data", "=", "request", ".", "text", ".", "encode", "(", "'utf-8'", ")", "if", "len", "(", "data", ")", "==", "0", ":", "return", "# parse XML return", "try", ":", "root", "=", "ET", ".", "fromstring", "(", "data", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"Can not parse SCPD content for '\"", "+", "serviceType", "+", "\"' from '\"", "+", "location", "+", "\"': \"", "+", "str", "(", "e", ")", ")", "actions", "=", "{", "}", "variableTypes", "=", "{", "}", "variableParameterDict", "=", "{", "}", "# iterate through the full XML tree", "for", "element", "in", "root", ".", "getchildren", "(", ")", ":", "tagName", "=", "element", ".", "tag", ".", "lower", "(", ")", "# go deeper for action lists", "if", "tagName", ".", "endswith", "(", "\"actionlist\"", ")", ":", "# remember the actions and where a specific variable gets referenced", "self", ".", "_parseSCPDActions", "(", "element", ",", "actions", ",", "variableParameterDict", ")", "# go deeper for the variable declarations", "elif", "tagName", ".", "endswith", "(", "\"servicestatetable\"", ")", ":", "self", ".", "_parseSCPDVariableTypes", "(", "element", ",", "variableTypes", ")", "# everything have been parsed now merge the variable declarations into the action parameters", "for", "name", "in", "variableParameterDict", ".", "keys", "(", ")", ":", "if", "name", "not", "in", "variableTypes", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "\"Variable reference in action can not be resolved: \"", "+", "name", ")", "# iterate through all arguments where this variable have been referenced", "for", "argument", "in", "variableParameterDict", "[", "name", "]", ":", "# fill in the type of this variable/argument", "argument", "[", "\"dataType\"", "]", "=", "variableTypes", "[", "name", "]", "[", "\"dataType\"", "]", "# if the variable declaration includes a default value add it to the action parameter as well", "if", "\"defaultValue\"", "in", "variableTypes", "[", "name", "]", ".", "keys", "(", ")", ":", "argument", "[", "\"defaultValue\"", "]", "=", "variableTypes", "[", "name", "]", "[", "\"defaultValue\"", "]", "self", ".", "__deviceSCPD", "[", "serviceType", "]", "=", "actions" ]
Internal method to load the action definitions. :param str serviceType: the service type to load :param int timeout: the timeout for downloading
[ "Internal", "method", "to", "load", "the", "action", "definitions", "." ]
python
train
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L1228-L1239
def set_principal_credit_string(self, credit_string): """Sets the principal credit string. arg: credit_string (string): the new credit string raise: InvalidArgument - ``credit_string`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``credit_string`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.repository.AssetForm.set_title_template self._my_map['principalCreditString'] = self._get_display_text(credit_string, self.get_principal_credit_string_metadata())
[ "def", "set_principal_credit_string", "(", "self", ",", "credit_string", ")", ":", "# Implemented from template for osid.repository.AssetForm.set_title_template", "self", ".", "_my_map", "[", "'principalCreditString'", "]", "=", "self", ".", "_get_display_text", "(", "credit_string", ",", "self", ".", "get_principal_credit_string_metadata", "(", ")", ")" ]
Sets the principal credit string. arg: credit_string (string): the new credit string raise: InvalidArgument - ``credit_string`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``credit_string`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "principal", "credit", "string", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py#L96-L117
def _get_linear_lookup_table_and_weight(nbits, wp): """ Generate a linear lookup table. :param nbits: int Number of bits to represent a quantized weight value :param wp: numpy.array Weight blob to be quantized Returns ------- lookup_table: numpy.array Lookup table of shape (2^nbits, ) qw: numpy.array Decomposed bit stream as a list of 0/1s of length (len(arr) * 8) """ w = wp.reshape(1, -1) qw, scales, biases = _quantize_channelwise_linear(w, nbits, axis=0) indices = _np.array(range(0, 2**nbits)) lookup_table = indices * scales[0] + biases[0] return lookup_table, qw
[ "def", "_get_linear_lookup_table_and_weight", "(", "nbits", ",", "wp", ")", ":", "w", "=", "wp", ".", "reshape", "(", "1", ",", "-", "1", ")", "qw", ",", "scales", ",", "biases", "=", "_quantize_channelwise_linear", "(", "w", ",", "nbits", ",", "axis", "=", "0", ")", "indices", "=", "_np", ".", "array", "(", "range", "(", "0", ",", "2", "**", "nbits", ")", ")", "lookup_table", "=", "indices", "*", "scales", "[", "0", "]", "+", "biases", "[", "0", "]", "return", "lookup_table", ",", "qw" ]
Generate a linear lookup table. :param nbits: int Number of bits to represent a quantized weight value :param wp: numpy.array Weight blob to be quantized Returns ------- lookup_table: numpy.array Lookup table of shape (2^nbits, ) qw: numpy.array Decomposed bit stream as a list of 0/1s of length (len(arr) * 8)
[ "Generate", "a", "linear", "lookup", "table", "." ]
python
train
pallets/werkzeug
src/werkzeug/local.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/local.py#L150-L161
def pop(self): """Removes the topmost item from the stack, will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, "stack", None) if stack is None: return None elif len(stack) == 1: release_local(self._local) return stack[-1] else: return stack.pop()
[ "def", "pop", "(", "self", ")", ":", "stack", "=", "getattr", "(", "self", ".", "_local", ",", "\"stack\"", ",", "None", ")", "if", "stack", "is", "None", ":", "return", "None", "elif", "len", "(", "stack", ")", "==", "1", ":", "release_local", "(", "self", ".", "_local", ")", "return", "stack", "[", "-", "1", "]", "else", ":", "return", "stack", ".", "pop", "(", ")" ]
Removes the topmost item from the stack, will return the old value or `None` if the stack was already empty.
[ "Removes", "the", "topmost", "item", "from", "the", "stack", "will", "return", "the", "old", "value", "or", "None", "if", "the", "stack", "was", "already", "empty", "." ]
python
train
bitlabstudio/django-development-fabfile
development_fabfile/fabfile/remote.py
https://github.com/bitlabstudio/django-development-fabfile/blob/a135c6eb5bdd0b496a7eccfd271aca558dd99243/development_fabfile/fabfile/remote.py#L131-L144
def run_export_db(filename=None): """ Exports the database on the server. Usage:: fab prod run_export_db fab prod run_export_db:filename=foobar.dump """ if not filename: filename = settings.DB_DUMP_FILENAME with cd(settings.FAB_SETTING('SERVER_PROJECT_ROOT')): run_workon('fab export_db:remote=True,filename={}'.format(filename))
[ "def", "run_export_db", "(", "filename", "=", "None", ")", ":", "if", "not", "filename", ":", "filename", "=", "settings", ".", "DB_DUMP_FILENAME", "with", "cd", "(", "settings", ".", "FAB_SETTING", "(", "'SERVER_PROJECT_ROOT'", ")", ")", ":", "run_workon", "(", "'fab export_db:remote=True,filename={}'", ".", "format", "(", "filename", ")", ")" ]
Exports the database on the server. Usage:: fab prod run_export_db fab prod run_export_db:filename=foobar.dump
[ "Exports", "the", "database", "on", "the", "server", "." ]
python
train
zacernst/timed_dict
timed_dict/timed_dict.py
https://github.com/zacernst/timed_dict/blob/01a1d145d246832e63c2e92e11d91cac5c3f2d1e/timed_dict/timed_dict.py#L198-L221
def set_expiration(self, key, ignore_missing=False, additional_seconds=None, seconds=None): ''' Alters the expiration time for a key. If the key is not present, then raise an Exception unless `ignore_missing` is set to `True`. Args: key: The key whose expiration we are changing. ignore_missing (bool): If set, then return silently if the key does not exist. Default is `False`. additional_seonds (int): Add this many seconds to the current expiration time. seconds (int): Expire the key this many seconds from now. ''' if key not in self.time_dict and ignore_missing: return elif key not in self.time_dict and not ignore_missing: raise Exception('Key missing from `TimedDict` and ' '`ignore_missing` is False.') if additional_seconds is not None: self.time_dict[key] += additional_seconds elif seconds is not None: self.time_dict[key] = time.time() + seconds
[ "def", "set_expiration", "(", "self", ",", "key", ",", "ignore_missing", "=", "False", ",", "additional_seconds", "=", "None", ",", "seconds", "=", "None", ")", ":", "if", "key", "not", "in", "self", ".", "time_dict", "and", "ignore_missing", ":", "return", "elif", "key", "not", "in", "self", ".", "time_dict", "and", "not", "ignore_missing", ":", "raise", "Exception", "(", "'Key missing from `TimedDict` and '", "'`ignore_missing` is False.'", ")", "if", "additional_seconds", "is", "not", "None", ":", "self", ".", "time_dict", "[", "key", "]", "+=", "additional_seconds", "elif", "seconds", "is", "not", "None", ":", "self", ".", "time_dict", "[", "key", "]", "=", "time", ".", "time", "(", ")", "+", "seconds" ]
Alters the expiration time for a key. If the key is not present, then raise an Exception unless `ignore_missing` is set to `True`. Args: key: The key whose expiration we are changing. ignore_missing (bool): If set, then return silently if the key does not exist. Default is `False`. additional_seonds (int): Add this many seconds to the current expiration time. seconds (int): Expire the key this many seconds from now.
[ "Alters", "the", "expiration", "time", "for", "a", "key", ".", "If", "the", "key", "is", "not", "present", "then", "raise", "an", "Exception", "unless", "ignore_missing", "is", "set", "to", "True", "." ]
python
train
brechtm/rinohtype
src/rinoh/backend/pdf/xobject/purepng.py
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/backend/pdf/xobject/purepng.py#L2625-L2662
def read(self, lenient=False): """ Read the PNG file and decode it. Returns (`width`, `height`, `pixels`, `metadata`). May use excessive memory. `pixels` are returned in boxed row flat pixel format. If the optional `lenient` argument evaluates to True, checksum failures will raise warnings rather than exceptions. """ self.preamble(lenient=lenient) raw = self.idatdecomp(lenient) if self.interlace: raw = bytearray(itertools.chain(*raw)) arraycode = 'BH'[self.bitdepth > 8] # Like :meth:`group` but producing an array.array object for # each row. pixels = map(lambda *row: array(arraycode, row), *[iter(self.deinterlace(raw))]*self.width*self.planes) else: pixels = self.iterboxed(self.iterstraight(raw)) meta = dict() for attr in 'greyscale alpha planes bitdepth interlace'.split(): meta[attr] = getattr(self, attr) meta['size'] = (self.width, self.height) for attr in ('gamma', 'transparent', 'background', 'last_mod_time', 'icc_profile', 'resolution', 'text', 'rendering_intent', 'white_point', 'rgb_points'): a = getattr(self, attr, None) if a is not None: meta[attr] = a if self.plte: meta['palette'] = self.palette() return self.width, self.height, pixels, meta
[ "def", "read", "(", "self", ",", "lenient", "=", "False", ")", ":", "self", ".", "preamble", "(", "lenient", "=", "lenient", ")", "raw", "=", "self", ".", "idatdecomp", "(", "lenient", ")", "if", "self", ".", "interlace", ":", "raw", "=", "bytearray", "(", "itertools", ".", "chain", "(", "*", "raw", ")", ")", "arraycode", "=", "'BH'", "[", "self", ".", "bitdepth", ">", "8", "]", "# Like :meth:`group` but producing an array.array object for", "# each row.", "pixels", "=", "map", "(", "lambda", "*", "row", ":", "array", "(", "arraycode", ",", "row", ")", ",", "*", "[", "iter", "(", "self", ".", "deinterlace", "(", "raw", ")", ")", "]", "*", "self", ".", "width", "*", "self", ".", "planes", ")", "else", ":", "pixels", "=", "self", ".", "iterboxed", "(", "self", ".", "iterstraight", "(", "raw", ")", ")", "meta", "=", "dict", "(", ")", "for", "attr", "in", "'greyscale alpha planes bitdepth interlace'", ".", "split", "(", ")", ":", "meta", "[", "attr", "]", "=", "getattr", "(", "self", ",", "attr", ")", "meta", "[", "'size'", "]", "=", "(", "self", ".", "width", ",", "self", ".", "height", ")", "for", "attr", "in", "(", "'gamma'", ",", "'transparent'", ",", "'background'", ",", "'last_mod_time'", ",", "'icc_profile'", ",", "'resolution'", ",", "'text'", ",", "'rendering_intent'", ",", "'white_point'", ",", "'rgb_points'", ")", ":", "a", "=", "getattr", "(", "self", ",", "attr", ",", "None", ")", "if", "a", "is", "not", "None", ":", "meta", "[", "attr", "]", "=", "a", "if", "self", ".", "plte", ":", "meta", "[", "'palette'", "]", "=", "self", ".", "palette", "(", ")", "return", "self", ".", "width", ",", "self", ".", "height", ",", "pixels", ",", "meta" ]
Read the PNG file and decode it. Returns (`width`, `height`, `pixels`, `metadata`). May use excessive memory. `pixels` are returned in boxed row flat pixel format. If the optional `lenient` argument evaluates to True, checksum failures will raise warnings rather than exceptions.
[ "Read", "the", "PNG", "file", "and", "decode", "it", "." ]
python
train
ejeschke/ginga
ginga/rv/plugins/Colorbar.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Colorbar.py#L130-L140
def rgbmap_cb(self, rgbmap, channel): """ This method is called when the RGBMap is changed. We update the ColorBar to match. """ if not self.gui_up: return fitsimage = channel.fitsimage if fitsimage != self.fv.getfocus_fitsimage(): return False self.change_cbar(self.fv, channel)
[ "def", "rgbmap_cb", "(", "self", ",", "rgbmap", ",", "channel", ")", ":", "if", "not", "self", ".", "gui_up", ":", "return", "fitsimage", "=", "channel", ".", "fitsimage", "if", "fitsimage", "!=", "self", ".", "fv", ".", "getfocus_fitsimage", "(", ")", ":", "return", "False", "self", ".", "change_cbar", "(", "self", ".", "fv", ",", "channel", ")" ]
This method is called when the RGBMap is changed. We update the ColorBar to match.
[ "This", "method", "is", "called", "when", "the", "RGBMap", "is", "changed", ".", "We", "update", "the", "ColorBar", "to", "match", "." ]
python
train
saltstack/salt
salt/modules/boto_s3_bucket.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_s3_bucket.py#L778-L807
def put_versioning(Bucket, Status, MFADelete=None, MFA=None, region=None, key=None, keyid=None, profile=None): ''' Given a valid config, update the versioning configuration for a bucket. Returns {updated: true} if versioning configuration was updated and returns {updated: False} if versioning configuration was not updated. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.put_versioning my_bucket Enabled ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) VersioningConfiguration = {'Status': Status} if MFADelete is not None: VersioningConfiguration['MFADelete'] = MFADelete kwargs = {} if MFA is not None: kwargs['MFA'] = MFA conn.put_bucket_versioning(Bucket=Bucket, VersioningConfiguration=VersioningConfiguration, **kwargs) return {'updated': True, 'name': Bucket} except ClientError as e: return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
[ "def", "put_versioning", "(", "Bucket", ",", "Status", ",", "MFADelete", "=", "None", ",", "MFA", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "VersioningConfiguration", "=", "{", "'Status'", ":", "Status", "}", "if", "MFADelete", "is", "not", "None", ":", "VersioningConfiguration", "[", "'MFADelete'", "]", "=", "MFADelete", "kwargs", "=", "{", "}", "if", "MFA", "is", "not", "None", ":", "kwargs", "[", "'MFA'", "]", "=", "MFA", "conn", ".", "put_bucket_versioning", "(", "Bucket", "=", "Bucket", ",", "VersioningConfiguration", "=", "VersioningConfiguration", ",", "*", "*", "kwargs", ")", "return", "{", "'updated'", ":", "True", ",", "'name'", ":", "Bucket", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'updated'", ":", "False", ",", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
Given a valid config, update the versioning configuration for a bucket. Returns {updated: true} if versioning configuration was updated and returns {updated: False} if versioning configuration was not updated. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.put_versioning my_bucket Enabled
[ "Given", "a", "valid", "config", "update", "the", "versioning", "configuration", "for", "a", "bucket", "." ]
python
train
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1389-L1399
def Percentile(pmf, percentage): """Computes a percentile of a given Pmf. percentage: float 0-100 """ p = percentage / 100.0 total = 0 for val, prob in pmf.Items(): total += prob if total >= p: return val
[ "def", "Percentile", "(", "pmf", ",", "percentage", ")", ":", "p", "=", "percentage", "/", "100.0", "total", "=", "0", "for", "val", ",", "prob", "in", "pmf", ".", "Items", "(", ")", ":", "total", "+=", "prob", "if", "total", ">=", "p", ":", "return", "val" ]
Computes a percentile of a given Pmf. percentage: float 0-100
[ "Computes", "a", "percentile", "of", "a", "given", "Pmf", "." ]
python
train
openego/ding0
ding0/grid/mv_grid/util/data_input.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/util/data_input.py#L221-L248
def _create_node_matrix_from_coord_section(specs): """Transformed parsed data from NODE_COORD_SECTION into an upper triangular matrix Calculates distances between nodes 'MATRIX' key added to `specs` """ distances = specs['NODE_COORD_SECTION'] specs['MATRIX'] = {} for i in distances: origin = tuple(distances[i]) specs['MATRIX'][i] = {} for j in specs['NODE_COORD_SECTION']: destination = tuple(distances[j]) distance = calculate_euc_distance(origin, destination) # # Upper triangular matrix # if i > j, ij = 0 # #if i > j: # continue specs['MATRIX'][i][j] = distance
[ "def", "_create_node_matrix_from_coord_section", "(", "specs", ")", ":", "distances", "=", "specs", "[", "'NODE_COORD_SECTION'", "]", "specs", "[", "'MATRIX'", "]", "=", "{", "}", "for", "i", "in", "distances", ":", "origin", "=", "tuple", "(", "distances", "[", "i", "]", ")", "specs", "[", "'MATRIX'", "]", "[", "i", "]", "=", "{", "}", "for", "j", "in", "specs", "[", "'NODE_COORD_SECTION'", "]", ":", "destination", "=", "tuple", "(", "distances", "[", "j", "]", ")", "distance", "=", "calculate_euc_distance", "(", "origin", ",", "destination", ")", "#", "# Upper triangular matrix", "# if i > j, ij = 0", "#", "#if i > j:", "# continue", "specs", "[", "'MATRIX'", "]", "[", "i", "]", "[", "j", "]", "=", "distance" ]
Transformed parsed data from NODE_COORD_SECTION into an upper triangular matrix Calculates distances between nodes 'MATRIX' key added to `specs`
[ "Transformed", "parsed", "data", "from", "NODE_COORD_SECTION", "into", "an", "upper", "triangular", "matrix" ]
python
train
apache/spark
python/pyspark/cloudpickle.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L799-L816
def save_attrgetter(self, obj): """attrgetter serializer""" class Dummy(object): def __init__(self, attrs, index=None): self.attrs = attrs self.index = index def __getattribute__(self, item): attrs = object.__getattribute__(self, "attrs") index = object.__getattribute__(self, "index") if index is None: index = len(attrs) attrs.append(item) else: attrs[index] = ".".join([attrs[index], item]) return type(self)(attrs, index) attrs = [] obj(Dummy(attrs)) return self.save_reduce(operator.attrgetter, tuple(attrs))
[ "def", "save_attrgetter", "(", "self", ",", "obj", ")", ":", "class", "Dummy", "(", "object", ")", ":", "def", "__init__", "(", "self", ",", "attrs", ",", "index", "=", "None", ")", ":", "self", ".", "attrs", "=", "attrs", "self", ".", "index", "=", "index", "def", "__getattribute__", "(", "self", ",", "item", ")", ":", "attrs", "=", "object", ".", "__getattribute__", "(", "self", ",", "\"attrs\"", ")", "index", "=", "object", ".", "__getattribute__", "(", "self", ",", "\"index\"", ")", "if", "index", "is", "None", ":", "index", "=", "len", "(", "attrs", ")", "attrs", ".", "append", "(", "item", ")", "else", ":", "attrs", "[", "index", "]", "=", "\".\"", ".", "join", "(", "[", "attrs", "[", "index", "]", ",", "item", "]", ")", "return", "type", "(", "self", ")", "(", "attrs", ",", "index", ")", "attrs", "=", "[", "]", "obj", "(", "Dummy", "(", "attrs", ")", ")", "return", "self", ".", "save_reduce", "(", "operator", ".", "attrgetter", ",", "tuple", "(", "attrs", ")", ")" ]
attrgetter serializer
[ "attrgetter", "serializer" ]
python
train
hyperledger/indy-sdk
wrappers/python/indy/anoncreds.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/anoncreds.py#L1247-L1364
async def verifier_verify_proof(proof_request_json: str, proof_json: str, schemas_json: str, credential_defs_json: str, rev_reg_defs_json: str, rev_regs_json: str) -> bool: """ Verifies a proof (of multiple credential). All required schemas, public keys and revocation registries must be provided. :param proof_request_json: { "name": string, "version": string, "nonce": string, "requested_attributes": { // set of requested attributes "<attr_referent>": <attr_info>, // see below ..., }, "requested_predicates": { // set of requested predicates "<predicate_referent>": <predicate_info>, // see below ..., }, "non_revoked": Optional<<non_revoc_interval>>, // see below, // If specified prover must proof non-revocation // for date in this interval for each attribute // (can be overridden on attribute level) } :param proof_json: created for request proof json { "requested_proof": { "revealed_attrs": { "requested_attr1_id": {sub_proof_index: number, raw: string, encoded: string}, "requested_attr4_id": {sub_proof_index: number: string, encoded: string}, }, "unrevealed_attrs": { "requested_attr3_id": {sub_proof_index: number} }, "self_attested_attrs": { "requested_attr2_id": self_attested_value, }, "requested_predicates": { "requested_predicate_1_referent": {sub_proof_index: int}, "requested_predicate_2_referent": {sub_proof_index: int}, } } "proof": { "proofs": [ <credential_proof>, <credential_proof>, <credential_proof> ], "aggregated_proof": <aggregated_proof> } "identifiers": [{schema_id, cred_def_id, Optional<rev_reg_id>, Optional<timestamp>}] } :param schemas_json: all schema jsons participating in the proof { <schema1_id>: <schema1_json>, <schema2_id>: <schema2_json>, <schema3_id>: <schema3_json>, } :param credential_defs_json: all credential definitions json participating in the proof { "cred_def1_id": <credential_def1_json>, "cred_def2_id": <credential_def2_json>, "cred_def3_id": <credential_def3_json>, } :param rev_reg_defs_json: all revocation registry definitions json participating in the proof { "rev_reg_def1_id": <rev_reg_def1_json>, "rev_reg_def2_id": <rev_reg_def2_json>, "rev_reg_def3_id": <rev_reg_def3_json>, } :param rev_regs_json: all revocation registries json participating in the proof { "rev_reg_def1_id": { "timestamp1": <rev_reg1>, "timestamp2": <rev_reg2>, }, "rev_reg_def2_id": { "timestamp3": <rev_reg3> }, "rev_reg_def3_id": { "timestamp4": <rev_reg4> }, } :return: valid: true - if signature is valid, false - otherwise """ logger = logging.getLogger(__name__) logger.debug("verifier_verify_proof: >>> proof_request_json: %r, proof_json: %r, schemas_json: %r, " "credential_defs_jsons: %r, rev_reg_defs_json: %r, rev_regs_json: %r", proof_request_json, proof_json, schemas_json, credential_defs_json, rev_reg_defs_json, rev_regs_json) if not hasattr(verifier_verify_proof, "cb"): logger.debug("verifier_verify_proof: Creating callback") verifier_verify_proof.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_bool)) c_proof_request_json = c_char_p(proof_request_json.encode('utf-8')) c_proof_json = c_char_p(proof_json.encode('utf-8')) c_schemas_json = c_char_p(schemas_json.encode('utf-8')) c_credential_defs_json = c_char_p(credential_defs_json.encode('utf-8')) c_rev_reg_defs_json = c_char_p(rev_reg_defs_json.encode('utf-8')) c_rev_regs_json = c_char_p(rev_regs_json.encode('utf-8')) res = await do_call('indy_verifier_verify_proof', c_proof_request_json, c_proof_json, c_schemas_json, c_credential_defs_json, c_rev_reg_defs_json, c_rev_regs_json, verifier_verify_proof.cb) logger.debug("verifier_verify_proof: <<< res: %r", res) return res
[ "async", "def", "verifier_verify_proof", "(", "proof_request_json", ":", "str", ",", "proof_json", ":", "str", ",", "schemas_json", ":", "str", ",", "credential_defs_json", ":", "str", ",", "rev_reg_defs_json", ":", "str", ",", "rev_regs_json", ":", "str", ")", "->", "bool", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"verifier_verify_proof: >>> proof_request_json: %r, proof_json: %r, schemas_json: %r, \"", "\"credential_defs_jsons: %r, rev_reg_defs_json: %r, rev_regs_json: %r\"", ",", "proof_request_json", ",", "proof_json", ",", "schemas_json", ",", "credential_defs_json", ",", "rev_reg_defs_json", ",", "rev_regs_json", ")", "if", "not", "hasattr", "(", "verifier_verify_proof", ",", "\"cb\"", ")", ":", "logger", ".", "debug", "(", "\"verifier_verify_proof: Creating callback\"", ")", "verifier_verify_proof", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_int32", ",", "c_int32", ",", "c_bool", ")", ")", "c_proof_request_json", "=", "c_char_p", "(", "proof_request_json", ".", "encode", "(", "'utf-8'", ")", ")", "c_proof_json", "=", "c_char_p", "(", "proof_json", ".", "encode", "(", "'utf-8'", ")", ")", "c_schemas_json", "=", "c_char_p", "(", "schemas_json", ".", "encode", "(", "'utf-8'", ")", ")", "c_credential_defs_json", "=", "c_char_p", "(", "credential_defs_json", ".", "encode", "(", "'utf-8'", ")", ")", "c_rev_reg_defs_json", "=", "c_char_p", "(", "rev_reg_defs_json", ".", "encode", "(", "'utf-8'", ")", ")", "c_rev_regs_json", "=", "c_char_p", "(", "rev_regs_json", ".", "encode", "(", "'utf-8'", ")", ")", "res", "=", "await", "do_call", "(", "'indy_verifier_verify_proof'", ",", "c_proof_request_json", ",", "c_proof_json", ",", "c_schemas_json", ",", "c_credential_defs_json", ",", "c_rev_reg_defs_json", ",", "c_rev_regs_json", ",", "verifier_verify_proof", ".", "cb", ")", "logger", ".", "debug", "(", "\"verifier_verify_proof: <<< res: %r\"", ",", "res", ")", "return", "res" ]
Verifies a proof (of multiple credential). All required schemas, public keys and revocation registries must be provided. :param proof_request_json: { "name": string, "version": string, "nonce": string, "requested_attributes": { // set of requested attributes "<attr_referent>": <attr_info>, // see below ..., }, "requested_predicates": { // set of requested predicates "<predicate_referent>": <predicate_info>, // see below ..., }, "non_revoked": Optional<<non_revoc_interval>>, // see below, // If specified prover must proof non-revocation // for date in this interval for each attribute // (can be overridden on attribute level) } :param proof_json: created for request proof json { "requested_proof": { "revealed_attrs": { "requested_attr1_id": {sub_proof_index: number, raw: string, encoded: string}, "requested_attr4_id": {sub_proof_index: number: string, encoded: string}, }, "unrevealed_attrs": { "requested_attr3_id": {sub_proof_index: number} }, "self_attested_attrs": { "requested_attr2_id": self_attested_value, }, "requested_predicates": { "requested_predicate_1_referent": {sub_proof_index: int}, "requested_predicate_2_referent": {sub_proof_index: int}, } } "proof": { "proofs": [ <credential_proof>, <credential_proof>, <credential_proof> ], "aggregated_proof": <aggregated_proof> } "identifiers": [{schema_id, cred_def_id, Optional<rev_reg_id>, Optional<timestamp>}] } :param schemas_json: all schema jsons participating in the proof { <schema1_id>: <schema1_json>, <schema2_id>: <schema2_json>, <schema3_id>: <schema3_json>, } :param credential_defs_json: all credential definitions json participating in the proof { "cred_def1_id": <credential_def1_json>, "cred_def2_id": <credential_def2_json>, "cred_def3_id": <credential_def3_json>, } :param rev_reg_defs_json: all revocation registry definitions json participating in the proof { "rev_reg_def1_id": <rev_reg_def1_json>, "rev_reg_def2_id": <rev_reg_def2_json>, "rev_reg_def3_id": <rev_reg_def3_json>, } :param rev_regs_json: all revocation registries json participating in the proof { "rev_reg_def1_id": { "timestamp1": <rev_reg1>, "timestamp2": <rev_reg2>, }, "rev_reg_def2_id": { "timestamp3": <rev_reg3> }, "rev_reg_def3_id": { "timestamp4": <rev_reg4> }, } :return: valid: true - if signature is valid, false - otherwise
[ "Verifies", "a", "proof", "(", "of", "multiple", "credential", ")", ".", "All", "required", "schemas", "public", "keys", "and", "revocation", "registries", "must", "be", "provided", "." ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/output_window.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/output_window.py#L1246-L1258
def _linefeed(self): """ Performs a line feed. """ last_line = self._cursor.blockNumber() == self._text_edit.blockCount() - 1 if self._cursor.atEnd() or last_line: if last_line: self._cursor.movePosition(self._cursor.EndOfBlock) self._cursor.insertText('\n') else: self._cursor.movePosition(self._cursor.Down) self._cursor.movePosition(self._cursor.StartOfBlock) self._text_edit.setTextCursor(self._cursor)
[ "def", "_linefeed", "(", "self", ")", ":", "last_line", "=", "self", ".", "_cursor", ".", "blockNumber", "(", ")", "==", "self", ".", "_text_edit", ".", "blockCount", "(", ")", "-", "1", "if", "self", ".", "_cursor", ".", "atEnd", "(", ")", "or", "last_line", ":", "if", "last_line", ":", "self", ".", "_cursor", ".", "movePosition", "(", "self", ".", "_cursor", ".", "EndOfBlock", ")", "self", ".", "_cursor", ".", "insertText", "(", "'\\n'", ")", "else", ":", "self", ".", "_cursor", ".", "movePosition", "(", "self", ".", "_cursor", ".", "Down", ")", "self", ".", "_cursor", ".", "movePosition", "(", "self", ".", "_cursor", ".", "StartOfBlock", ")", "self", ".", "_text_edit", ".", "setTextCursor", "(", "self", ".", "_cursor", ")" ]
Performs a line feed.
[ "Performs", "a", "line", "feed", "." ]
python
train
edx/edx-search
search/elastic.py
https://github.com/edx/edx-search/blob/476cf02b71ceba34ae7d8b798f36d60692317c55/search/elastic.py#L64-L90
def _get_filter_field(field_name, field_value): """ Return field to apply into filter, if an array then use a range, otherwise look for a term match """ filter_field = None if isinstance(field_value, ValueRange): range_values = {} if field_value.lower: range_values.update({"gte": field_value.lower_string}) if field_value.upper: range_values.update({"lte": field_value.upper_string}) filter_field = { "range": { field_name: range_values } } elif _is_iterable(field_value): filter_field = { "terms": { field_name: field_value } } else: filter_field = { "term": { field_name: field_value } } return filter_field
[ "def", "_get_filter_field", "(", "field_name", ",", "field_value", ")", ":", "filter_field", "=", "None", "if", "isinstance", "(", "field_value", ",", "ValueRange", ")", ":", "range_values", "=", "{", "}", "if", "field_value", ".", "lower", ":", "range_values", ".", "update", "(", "{", "\"gte\"", ":", "field_value", ".", "lower_string", "}", ")", "if", "field_value", ".", "upper", ":", "range_values", ".", "update", "(", "{", "\"lte\"", ":", "field_value", ".", "upper_string", "}", ")", "filter_field", "=", "{", "\"range\"", ":", "{", "field_name", ":", "range_values", "}", "}", "elif", "_is_iterable", "(", "field_value", ")", ":", "filter_field", "=", "{", "\"terms\"", ":", "{", "field_name", ":", "field_value", "}", "}", "else", ":", "filter_field", "=", "{", "\"term\"", ":", "{", "field_name", ":", "field_value", "}", "}", "return", "filter_field" ]
Return field to apply into filter, if an array then use a range, otherwise look for a term match
[ "Return", "field", "to", "apply", "into", "filter", "if", "an", "array", "then", "use", "a", "range", "otherwise", "look", "for", "a", "term", "match" ]
python
valid
tcalmant/ipopo
pelix/shell/parser.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/parser.py#L382-L416
def get_ns_command(self, cmd_name): """ Retrieves the name space and the command associated to the given command name. :param cmd_name: The given command name :return: A 2-tuple (name space, command) :raise ValueError: Unknown command name """ namespace, command = _split_ns_command(cmd_name) if not namespace: # Name space not given, look for the command spaces = self.__find_command_ns(command) if not spaces: # Unknown command raise ValueError("Unknown command {0}".format(command)) elif len(spaces) > 1: # Multiple possibilities if spaces[0] == DEFAULT_NAMESPACE: # Default name space has priority namespace = DEFAULT_NAMESPACE else: # Ambiguous name raise ValueError( "Multiple name spaces for command '{0}': {1}".format( command, ", ".join(sorted(spaces)) ) ) else: # Use the found name space namespace = spaces[0] # Command found return namespace, command
[ "def", "get_ns_command", "(", "self", ",", "cmd_name", ")", ":", "namespace", ",", "command", "=", "_split_ns_command", "(", "cmd_name", ")", "if", "not", "namespace", ":", "# Name space not given, look for the command", "spaces", "=", "self", ".", "__find_command_ns", "(", "command", ")", "if", "not", "spaces", ":", "# Unknown command", "raise", "ValueError", "(", "\"Unknown command {0}\"", ".", "format", "(", "command", ")", ")", "elif", "len", "(", "spaces", ")", ">", "1", ":", "# Multiple possibilities", "if", "spaces", "[", "0", "]", "==", "DEFAULT_NAMESPACE", ":", "# Default name space has priority", "namespace", "=", "DEFAULT_NAMESPACE", "else", ":", "# Ambiguous name", "raise", "ValueError", "(", "\"Multiple name spaces for command '{0}': {1}\"", ".", "format", "(", "command", ",", "\", \"", ".", "join", "(", "sorted", "(", "spaces", ")", ")", ")", ")", "else", ":", "# Use the found name space", "namespace", "=", "spaces", "[", "0", "]", "# Command found", "return", "namespace", ",", "command" ]
Retrieves the name space and the command associated to the given command name. :param cmd_name: The given command name :return: A 2-tuple (name space, command) :raise ValueError: Unknown command name
[ "Retrieves", "the", "name", "space", "and", "the", "command", "associated", "to", "the", "given", "command", "name", "." ]
python
train
MediaFire/mediafire-python-open-sdk
mediafire/client.py
https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/client.py#L255-L280
def get_folder_contents_iter(self, uri): """Return iterator for directory contents. uri -- mediafire URI Example: for item in get_folder_contents_iter('mf:///Documents'): print(item) """ resource = self.get_resource_by_uri(uri) if not isinstance(resource, Folder): raise NotAFolderError(uri) folder_key = resource['folderkey'] for item in self._folder_get_content_iter(folder_key): if 'filename' in item: # Work around https://mediafire.mantishub.com/view.php?id=5 # TODO: remove in 1.0 if ".patch." in item['filename']: continue yield File(item) elif 'name' in item: yield Folder(item)
[ "def", "get_folder_contents_iter", "(", "self", ",", "uri", ")", ":", "resource", "=", "self", ".", "get_resource_by_uri", "(", "uri", ")", "if", "not", "isinstance", "(", "resource", ",", "Folder", ")", ":", "raise", "NotAFolderError", "(", "uri", ")", "folder_key", "=", "resource", "[", "'folderkey'", "]", "for", "item", "in", "self", ".", "_folder_get_content_iter", "(", "folder_key", ")", ":", "if", "'filename'", "in", "item", ":", "# Work around https://mediafire.mantishub.com/view.php?id=5", "# TODO: remove in 1.0", "if", "\".patch.\"", "in", "item", "[", "'filename'", "]", ":", "continue", "yield", "File", "(", "item", ")", "elif", "'name'", "in", "item", ":", "yield", "Folder", "(", "item", ")" ]
Return iterator for directory contents. uri -- mediafire URI Example: for item in get_folder_contents_iter('mf:///Documents'): print(item)
[ "Return", "iterator", "for", "directory", "contents", "." ]
python
train
daler/gffutils
gffutils/helpers.py
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L307-L333
def merge_attributes(attr1, attr2): """ Merges two attribute dictionaries into a single dictionary. Parameters ---------- `attr1`, `attr2` : dict Returns ------- dict """ new_d = copy.deepcopy(attr1) new_d.update(attr2) #all of attr2 key : values just overwrote attr1, fix it for k, v in new_d.items(): if not isinstance(v, list): new_d[k] = [v] for k, v in six.iteritems(attr1): if k in attr2: if not isinstance(v, list): v = [v] new_d[k].extend(v) return dict((k, sorted(set(v))) for k, v in new_d.items())
[ "def", "merge_attributes", "(", "attr1", ",", "attr2", ")", ":", "new_d", "=", "copy", ".", "deepcopy", "(", "attr1", ")", "new_d", ".", "update", "(", "attr2", ")", "#all of attr2 key : values just overwrote attr1, fix it", "for", "k", ",", "v", "in", "new_d", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "v", ",", "list", ")", ":", "new_d", "[", "k", "]", "=", "[", "v", "]", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "attr1", ")", ":", "if", "k", "in", "attr2", ":", "if", "not", "isinstance", "(", "v", ",", "list", ")", ":", "v", "=", "[", "v", "]", "new_d", "[", "k", "]", ".", "extend", "(", "v", ")", "return", "dict", "(", "(", "k", ",", "sorted", "(", "set", "(", "v", ")", ")", ")", "for", "k", ",", "v", "in", "new_d", ".", "items", "(", ")", ")" ]
Merges two attribute dictionaries into a single dictionary. Parameters ---------- `attr1`, `attr2` : dict Returns ------- dict
[ "Merges", "two", "attribute", "dictionaries", "into", "a", "single", "dictionary", "." ]
python
train
neuropsychology/NeuroKit.py
neurokit/signal/signal.py
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/signal/signal.py#L21-L71
def interpolate(values, value_times, sampling_rate=1000): """ 3rd order spline interpolation. Parameters ---------- values : dataframe Values. value_times : list Time indices of values. sampling_rate : int Sampling rate (samples/second). Returns ---------- signal : pd.Series An array containing the values indexed by time. Example ---------- >>> import neurokit as nk >>> signal = interpolate([800, 900, 700, 500], [1000, 2000, 3000, 4000], sampling_rate=1000) >>> pd.Series(signal).plot() Notes ---------- *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ *Dependencies* - scipy - pandas """ # values=RRis.copy() # value_times=beats_times.copy() # Preprocessing initial_index = value_times[0] value_times = np.array(value_times) - initial_index # fit a 3rd degree spline on the data. spline = scipy.interpolate.splrep(x=value_times, y=values, k=3, s=0) # s=0 guarantees that it will pass through ALL the given points x = np.arange(0, value_times[-1], 1) # Get the values indexed per time signal = scipy.interpolate.splev(x=x, tck=spline, der=0) # Transform to series signal = pd.Series(signal) signal.index = np.array(np.arange(initial_index, initial_index+len(signal), 1)) return(signal)
[ "def", "interpolate", "(", "values", ",", "value_times", ",", "sampling_rate", "=", "1000", ")", ":", "# values=RRis.copy()", "# value_times=beats_times.copy()", "# Preprocessing", "initial_index", "=", "value_times", "[", "0", "]", "value_times", "=", "np", ".", "array", "(", "value_times", ")", "-", "initial_index", "# fit a 3rd degree spline on the data.", "spline", "=", "scipy", ".", "interpolate", ".", "splrep", "(", "x", "=", "value_times", ",", "y", "=", "values", ",", "k", "=", "3", ",", "s", "=", "0", ")", "# s=0 guarantees that it will pass through ALL the given points", "x", "=", "np", ".", "arange", "(", "0", ",", "value_times", "[", "-", "1", "]", ",", "1", ")", "# Get the values indexed per time", "signal", "=", "scipy", ".", "interpolate", ".", "splev", "(", "x", "=", "x", ",", "tck", "=", "spline", ",", "der", "=", "0", ")", "# Transform to series", "signal", "=", "pd", ".", "Series", "(", "signal", ")", "signal", ".", "index", "=", "np", ".", "array", "(", "np", ".", "arange", "(", "initial_index", ",", "initial_index", "+", "len", "(", "signal", ")", ",", "1", ")", ")", "return", "(", "signal", ")" ]
3rd order spline interpolation. Parameters ---------- values : dataframe Values. value_times : list Time indices of values. sampling_rate : int Sampling rate (samples/second). Returns ---------- signal : pd.Series An array containing the values indexed by time. Example ---------- >>> import neurokit as nk >>> signal = interpolate([800, 900, 700, 500], [1000, 2000, 3000, 4000], sampling_rate=1000) >>> pd.Series(signal).plot() Notes ---------- *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ *Dependencies* - scipy - pandas
[ "3rd", "order", "spline", "interpolation", "." ]
python
train
shoebot/shoebot
lib/photobot/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/photobot/__init__.py#L745-L756
def sharpen(self, value=1.0): """Increases or decreases the sharpness in the layer. The given value is a percentage to increase or decrease the image sharpness, for example 0.8 means sharpness at 80%. """ s = ImageEnhance.Sharpness(self.img) self.img = s.enhance(value)
[ "def", "sharpen", "(", "self", ",", "value", "=", "1.0", ")", ":", "s", "=", "ImageEnhance", ".", "Sharpness", "(", "self", ".", "img", ")", "self", ".", "img", "=", "s", ".", "enhance", "(", "value", ")" ]
Increases or decreases the sharpness in the layer. The given value is a percentage to increase or decrease the image sharpness, for example 0.8 means sharpness at 80%.
[ "Increases", "or", "decreases", "the", "sharpness", "in", "the", "layer", ".", "The", "given", "value", "is", "a", "percentage", "to", "increase", "or", "decrease", "the", "image", "sharpness", "for", "example", "0", ".", "8", "means", "sharpness", "at", "80%", "." ]
python
valid
Mxit/python-mxit
mxit/services.py
https://github.com/Mxit/python-mxit/blob/6b18a54ef6fbfe1f9d94755ba3d4ad77743c8b0c/mxit/services.py#L240-L249
def add_contact(self, contact_id, scope='contact/invite'): """ Add a contact contact_id can either be the mxit ID of a service or a Mxit user User authentication required with the following scope: 'contact/invite' """ return _put( token=self.oauth.get_user_token(scope), uri='/user/socialgraph/contact/' + urllib.quote(contact_id) )
[ "def", "add_contact", "(", "self", ",", "contact_id", ",", "scope", "=", "'contact/invite'", ")", ":", "return", "_put", "(", "token", "=", "self", ".", "oauth", ".", "get_user_token", "(", "scope", ")", ",", "uri", "=", "'/user/socialgraph/contact/'", "+", "urllib", ".", "quote", "(", "contact_id", ")", ")" ]
Add a contact contact_id can either be the mxit ID of a service or a Mxit user User authentication required with the following scope: 'contact/invite'
[ "Add", "a", "contact", "contact_id", "can", "either", "be", "the", "mxit", "ID", "of", "a", "service", "or", "a", "Mxit", "user", "User", "authentication", "required", "with", "the", "following", "scope", ":", "contact", "/", "invite" ]
python
train
bjoernricks/python-quilt
quilt/db.py
https://github.com/bjoernricks/python-quilt/blob/fae88237f601848cc34d073584d9dcb409f01777/quilt/db.py#L156-L169
def add_patches(self, patches, after=None): """ Add a list of patches to the patches list """ if after is None: self.insert_patches(patches) else: self._check_patch(after) patchlines = self._patchlines_before(after) patchlines.append(self.patch2line[after]) for patch in patches: patchline = PatchLine(patch) patchlines.append(patchline) self.patch2line[patchline.get_patch()] = patchline patchlines.extend(self._patchlines_after(after)) self.patchlines = patchlines
[ "def", "add_patches", "(", "self", ",", "patches", ",", "after", "=", "None", ")", ":", "if", "after", "is", "None", ":", "self", ".", "insert_patches", "(", "patches", ")", "else", ":", "self", ".", "_check_patch", "(", "after", ")", "patchlines", "=", "self", ".", "_patchlines_before", "(", "after", ")", "patchlines", ".", "append", "(", "self", ".", "patch2line", "[", "after", "]", ")", "for", "patch", "in", "patches", ":", "patchline", "=", "PatchLine", "(", "patch", ")", "patchlines", ".", "append", "(", "patchline", ")", "self", ".", "patch2line", "[", "patchline", ".", "get_patch", "(", ")", "]", "=", "patchline", "patchlines", ".", "extend", "(", "self", ".", "_patchlines_after", "(", "after", ")", ")", "self", ".", "patchlines", "=", "patchlines" ]
Add a list of patches to the patches list
[ "Add", "a", "list", "of", "patches", "to", "the", "patches", "list" ]
python
test
ktdreyer/txbugzilla
txbugzilla/__init__.py
https://github.com/ktdreyer/txbugzilla/blob/ccfc6667ce9d696b08b468b25c813cc2b68d30d6/txbugzilla/__init__.py#L218-L221
def id(self): """ Bug ID number that caused this error """ m = re.match(r'Bug #(\d+) does not exist', self.message) return m.group(1)
[ "def", "id", "(", "self", ")", ":", "m", "=", "re", ".", "match", "(", "r'Bug #(\\d+) does not exist'", ",", "self", ".", "message", ")", "return", "m", ".", "group", "(", "1", ")" ]
Bug ID number that caused this error
[ "Bug", "ID", "number", "that", "caused", "this", "error" ]
python
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1489-L1534
def _parse_tokens(chunk, format=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Parses tokens from <word> elements in the given XML <chunk> element. Returns a flat list of tokens, in which each token is [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]. If a <chunk type="PNP"> is encountered, traverses all of the chunks in the PNP. """ tokens = [] # Only process <chunk> and <chink> elements, # text nodes in between return an empty list. if not (chunk.tag == XML_CHUNK or chunk.tag == XML_CHINK): return [] type = chunk.get(XML_TYPE, "O") if type == "PNP": # For, <chunk type="PNP">, recurse all the child chunks inside the PNP. for ch in chunk: tokens.extend(_parse_tokens(ch, format)) # Tag each of them as part of the PNP. if PNP in format: i = format.index(PNP) for j, token in enumerate(tokens): token[i] = (j==0 and "B-" or "I-") + "PNP" # Store attachments so we can construct anchor id's in parse_string(). # This has to be done at the end, when all the chunks have been found. a = chunk.get(XML_OF).split(_UID_SEPARATOR)[-1] if a: _attachments.setdefault(a, []) _attachments[a].append(tokens) return tokens # For <chunk type-"VP" id="1">, the relation is VP-1. # For <chunk type="NP" relation="OBJ" of="1">, the relation is NP-OBJ-1. relation = _parse_relation(chunk, type) # Process all of the <word> elements in the chunk, for example: # <word type="NN" lemma="pizza">pizza</word> => [pizza, NN, I-NP, O, NP-OBJ-1, O, pizza] for word in filter(lambda n: n.tag == XML_WORD, chunk): tokens.append(_parse_token(word, chunk=type, relation=relation, format=format)) # Add the IOB chunk tags: # words at the start of a chunk are marked with B-, words inside with I-. if CHUNK in format: i = format.index(CHUNK) for j, token in enumerate(tokens): token[i] = token[i] != "O" and ((j==0 and "B-" or "I-") + token[i]) or "O" # The chunk can be the anchor of one or more PNP chunks. # Store anchors so we can construct anchor id's in parse_string(). a = chunk.get(XML_ANCHOR, "").split(_UID_SEPARATOR)[-1] if a: _anchors[a] = tokens return tokens
[ "def", "_parse_tokens", "(", "chunk", ",", "format", "=", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", ",", "REL", ",", "ANCHOR", ",", "LEMMA", "]", ")", ":", "tokens", "=", "[", "]", "# Only process <chunk> and <chink> elements, ", "# text nodes in between return an empty list.", "if", "not", "(", "chunk", ".", "tag", "==", "XML_CHUNK", "or", "chunk", ".", "tag", "==", "XML_CHINK", ")", ":", "return", "[", "]", "type", "=", "chunk", ".", "get", "(", "XML_TYPE", ",", "\"O\"", ")", "if", "type", "==", "\"PNP\"", ":", "# For, <chunk type=\"PNP\">, recurse all the child chunks inside the PNP.", "for", "ch", "in", "chunk", ":", "tokens", ".", "extend", "(", "_parse_tokens", "(", "ch", ",", "format", ")", ")", "# Tag each of them as part of the PNP.", "if", "PNP", "in", "format", ":", "i", "=", "format", ".", "index", "(", "PNP", ")", "for", "j", ",", "token", "in", "enumerate", "(", "tokens", ")", ":", "token", "[", "i", "]", "=", "(", "j", "==", "0", "and", "\"B-\"", "or", "\"I-\"", ")", "+", "\"PNP\"", "# Store attachments so we can construct anchor id's in parse_string().", "# This has to be done at the end, when all the chunks have been found.", "a", "=", "chunk", ".", "get", "(", "XML_OF", ")", ".", "split", "(", "_UID_SEPARATOR", ")", "[", "-", "1", "]", "if", "a", ":", "_attachments", ".", "setdefault", "(", "a", ",", "[", "]", ")", "_attachments", "[", "a", "]", ".", "append", "(", "tokens", ")", "return", "tokens", "# For <chunk type-\"VP\" id=\"1\">, the relation is VP-1.", "# For <chunk type=\"NP\" relation=\"OBJ\" of=\"1\">, the relation is NP-OBJ-1.", "relation", "=", "_parse_relation", "(", "chunk", ",", "type", ")", "# Process all of the <word> elements in the chunk, for example:", "# <word type=\"NN\" lemma=\"pizza\">pizza</word> => [pizza, NN, I-NP, O, NP-OBJ-1, O, pizza]", "for", "word", "in", "filter", "(", "lambda", "n", ":", "n", ".", "tag", "==", "XML_WORD", ",", "chunk", ")", ":", "tokens", ".", "append", "(", "_parse_token", "(", "word", ",", "chunk", "=", "type", ",", "relation", "=", "relation", ",", "format", "=", "format", ")", ")", "# Add the IOB chunk tags:", "# words at the start of a chunk are marked with B-, words inside with I-.", "if", "CHUNK", "in", "format", ":", "i", "=", "format", ".", "index", "(", "CHUNK", ")", "for", "j", ",", "token", "in", "enumerate", "(", "tokens", ")", ":", "token", "[", "i", "]", "=", "token", "[", "i", "]", "!=", "\"O\"", "and", "(", "(", "j", "==", "0", "and", "\"B-\"", "or", "\"I-\"", ")", "+", "token", "[", "i", "]", ")", "or", "\"O\"", "# The chunk can be the anchor of one or more PNP chunks.", "# Store anchors so we can construct anchor id's in parse_string().", "a", "=", "chunk", ".", "get", "(", "XML_ANCHOR", ",", "\"\"", ")", ".", "split", "(", "_UID_SEPARATOR", ")", "[", "-", "1", "]", "if", "a", ":", "_anchors", "[", "a", "]", "=", "tokens", "return", "tokens" ]
Parses tokens from <word> elements in the given XML <chunk> element. Returns a flat list of tokens, in which each token is [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]. If a <chunk type="PNP"> is encountered, traverses all of the chunks in the PNP.
[ "Parses", "tokens", "from", "<word", ">", "elements", "in", "the", "given", "XML", "<chunk", ">", "element", ".", "Returns", "a", "flat", "list", "of", "tokens", "in", "which", "each", "token", "is", "[", "WORD", "POS", "CHUNK", "PNP", "RELATION", "ANCHOR", "LEMMA", "]", ".", "If", "a", "<chunk", "type", "=", "PNP", ">", "is", "encountered", "traverses", "all", "of", "the", "chunks", "in", "the", "PNP", "." ]
python
train
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1961-L1975
def shutdown(self): """Shutdown the server and stop responding to requests.""" self.__should_stop.set() if self.__server_thread == threading.current_thread(): self.__is_shutdown.set() self.__is_running.clear() else: if self.__wakeup_fd is not None: os.write(self.__wakeup_fd.write_fd, b'\x00') self.__is_shutdown.wait() if self.__wakeup_fd is not None: self.__wakeup_fd.close() self.__wakeup_fd = None for server in self.sub_servers: server.shutdown()
[ "def", "shutdown", "(", "self", ")", ":", "self", ".", "__should_stop", ".", "set", "(", ")", "if", "self", ".", "__server_thread", "==", "threading", ".", "current_thread", "(", ")", ":", "self", ".", "__is_shutdown", ".", "set", "(", ")", "self", ".", "__is_running", ".", "clear", "(", ")", "else", ":", "if", "self", ".", "__wakeup_fd", "is", "not", "None", ":", "os", ".", "write", "(", "self", ".", "__wakeup_fd", ".", "write_fd", ",", "b'\\x00'", ")", "self", ".", "__is_shutdown", ".", "wait", "(", ")", "if", "self", ".", "__wakeup_fd", "is", "not", "None", ":", "self", ".", "__wakeup_fd", ".", "close", "(", ")", "self", ".", "__wakeup_fd", "=", "None", "for", "server", "in", "self", ".", "sub_servers", ":", "server", ".", "shutdown", "(", ")" ]
Shutdown the server and stop responding to requests.
[ "Shutdown", "the", "server", "and", "stop", "responding", "to", "requests", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L5686-L5689
def resolve(self, pubID, sysID): """Do a complete resolution lookup of an External Identifier """ ret = libxml2mod.xmlACatalogResolve(self._o, pubID, sysID) return ret
[ "def", "resolve", "(", "self", ",", "pubID", ",", "sysID", ")", ":", "ret", "=", "libxml2mod", ".", "xmlACatalogResolve", "(", "self", ".", "_o", ",", "pubID", ",", "sysID", ")", "return", "ret" ]
Do a complete resolution lookup of an External Identifier
[ "Do", "a", "complete", "resolution", "lookup", "of", "an", "External", "Identifier" ]
python
train
DigitalGlobe/gbdxtools
gbdxtools/catalog.py
https://github.com/DigitalGlobe/gbdxtools/blob/def62f8f2d77b168aa2bd115290aaa0f9a08a4bb/gbdxtools/catalog.py#L100-L115
def get_address_coords(self, address): ''' Use the google geocoder to get latitude and longitude for an address string Args: address: any address string Returns: A tuple of (lat,lng) ''' url = "https://maps.googleapis.com/maps/api/geocode/json?&address=" + address r = requests.get(url) r.raise_for_status() results = r.json()['results'] lat = results[0]['geometry']['location']['lat'] lng = results[0]['geometry']['location']['lng'] return lat, lng
[ "def", "get_address_coords", "(", "self", ",", "address", ")", ":", "url", "=", "\"https://maps.googleapis.com/maps/api/geocode/json?&address=\"", "+", "address", "r", "=", "requests", ".", "get", "(", "url", ")", "r", ".", "raise_for_status", "(", ")", "results", "=", "r", ".", "json", "(", ")", "[", "'results'", "]", "lat", "=", "results", "[", "0", "]", "[", "'geometry'", "]", "[", "'location'", "]", "[", "'lat'", "]", "lng", "=", "results", "[", "0", "]", "[", "'geometry'", "]", "[", "'location'", "]", "[", "'lng'", "]", "return", "lat", ",", "lng" ]
Use the google geocoder to get latitude and longitude for an address string Args: address: any address string Returns: A tuple of (lat,lng)
[ "Use", "the", "google", "geocoder", "to", "get", "latitude", "and", "longitude", "for", "an", "address", "string" ]
python
valid
gijzelaerr/python-snap7
snap7/server.py
https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/server.py#L151-L159
def start(self, tcpport=102): """ start the server. """ if tcpport != 102: logger.info("setting server TCP port to %s" % tcpport) self.set_param(snap7.snap7types.LocalPort, tcpport) logger.info("starting server on 0.0.0.0:%s" % tcpport) return self.library.Srv_Start(self.pointer)
[ "def", "start", "(", "self", ",", "tcpport", "=", "102", ")", ":", "if", "tcpport", "!=", "102", ":", "logger", ".", "info", "(", "\"setting server TCP port to %s\"", "%", "tcpport", ")", "self", ".", "set_param", "(", "snap7", ".", "snap7types", ".", "LocalPort", ",", "tcpport", ")", "logger", ".", "info", "(", "\"starting server on 0.0.0.0:%s\"", "%", "tcpport", ")", "return", "self", ".", "library", ".", "Srv_Start", "(", "self", ".", "pointer", ")" ]
start the server.
[ "start", "the", "server", "." ]
python
train
sprymix/metamagic.json
metamagic/json/encoder.py
https://github.com/sprymix/metamagic.json/blob/c95d3cacd641d433af44f0774f51a085cb4888e6/metamagic/json/encoder.py#L216-L227
def _encode_list(self, obj):# do """Returns a JSON representation of a Python list""" self._increment_nested_level() buffer = [] for element in obj: buffer.append(self._encode(element)) self._decrement_nested_level() return '['+ ','.join(buffer) + ']'
[ "def", "_encode_list", "(", "self", ",", "obj", ")", ":", "# do", "self", ".", "_increment_nested_level", "(", ")", "buffer", "=", "[", "]", "for", "element", "in", "obj", ":", "buffer", ".", "append", "(", "self", ".", "_encode", "(", "element", ")", ")", "self", ".", "_decrement_nested_level", "(", ")", "return", "'['", "+", "','", ".", "join", "(", "buffer", ")", "+", "']'" ]
Returns a JSON representation of a Python list
[ "Returns", "a", "JSON", "representation", "of", "a", "Python", "list" ]
python
train
ic-labs/django-icekit
icekit/plugins/base.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/plugins/base.py#L41-L45
def get_plugins(cls, *args, **kwargs): """ Return a list of plugin instances and pass through arguments. """ return [plugin(*args, **kwargs) for plugin in cls.plugins]
[ "def", "get_plugins", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "[", "plugin", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "plugin", "in", "cls", ".", "plugins", "]" ]
Return a list of plugin instances and pass through arguments.
[ "Return", "a", "list", "of", "plugin", "instances", "and", "pass", "through", "arguments", "." ]
python
train
roanuz/py-cricket
src/pycricket.py
https://github.com/roanuz/py-cricket/blob/fa47fe2e92915fc58db38898213e974742af55d4/src/pycricket.py#L206-L217
def get_player_stats(self, player_key, board_key): """ Calling the Player Stats API Args: player_key: Key of the player board_key: key of the board Return: json data """ player_stats_url = self.api_path + 'player/' + player_key + '/league/' + board_key + '/stats/' response = self.get_response(player_stats_url) return response
[ "def", "get_player_stats", "(", "self", ",", "player_key", ",", "board_key", ")", ":", "player_stats_url", "=", "self", ".", "api_path", "+", "'player/'", "+", "player_key", "+", "'/league/'", "+", "board_key", "+", "'/stats/'", "response", "=", "self", ".", "get_response", "(", "player_stats_url", ")", "return", "response" ]
Calling the Player Stats API Args: player_key: Key of the player board_key: key of the board Return: json data
[ "Calling", "the", "Player", "Stats", "API", "Args", ":", "player_key", ":", "Key", "of", "the", "player", "board_key", ":", "key", "of", "the", "board", "Return", ":", "json", "data" ]
python
train
jobovy/galpy
galpy/potential/TwoPowerSphericalPotential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/TwoPowerSphericalPotential.py#L878-L895
def _evaluate(self,R,z,phi=0.,t=0.): """ NAME: _evaluate PURPOSE: evaluate the potential at R,z INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: Phi(R,z) HISTORY: 2010-07-09 - Started - Bovy (NYU) """ r= numpy.sqrt(R**2.+z**2.) return -numpy.log(1.+r/self.a)/r
[ "def", "_evaluate", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ")", ":", "r", "=", "numpy", ".", "sqrt", "(", "R", "**", "2.", "+", "z", "**", "2.", ")", "return", "-", "numpy", ".", "log", "(", "1.", "+", "r", "/", "self", ".", "a", ")", "/", "r" ]
NAME: _evaluate PURPOSE: evaluate the potential at R,z INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: Phi(R,z) HISTORY: 2010-07-09 - Started - Bovy (NYU)
[ "NAME", ":", "_evaluate", "PURPOSE", ":", "evaluate", "the", "potential", "at", "R", "z", "INPUT", ":", "R", "-", "Galactocentric", "cylindrical", "radius", "z", "-", "vertical", "height", "phi", "-", "azimuth", "t", "-", "time", "OUTPUT", ":", "Phi", "(", "R", "z", ")", "HISTORY", ":", "2010", "-", "07", "-", "09", "-", "Started", "-", "Bovy", "(", "NYU", ")" ]
python
train
larsmans/seqlearn
examples/conll.py
https://github.com/larsmans/seqlearn/blob/32d4bfaebdd877733f180ea6072e8fc1266bc559/examples/conll.py#L18-L39
def features(sentence, i): """Features for i'th token in sentence. Currently baseline named-entity recognition features, but these can easily be changed to do POS tagging or chunking. """ word = sentence[i] yield "word:{}" + word.lower() if word[0].isupper(): yield "CAP" if i > 0: yield "word-1:{}" + sentence[i - 1].lower() if i > 1: yield "word-2:{}" + sentence[i - 2].lower() if i + 1 < len(sentence): yield "word+1:{}" + sentence[i + 1].lower() if i + 2 < len(sentence): yield "word+2:{}" + sentence[i + 2].lower()
[ "def", "features", "(", "sentence", ",", "i", ")", ":", "word", "=", "sentence", "[", "i", "]", "yield", "\"word:{}\"", "+", "word", ".", "lower", "(", ")", "if", "word", "[", "0", "]", ".", "isupper", "(", ")", ":", "yield", "\"CAP\"", "if", "i", ">", "0", ":", "yield", "\"word-1:{}\"", "+", "sentence", "[", "i", "-", "1", "]", ".", "lower", "(", ")", "if", "i", ">", "1", ":", "yield", "\"word-2:{}\"", "+", "sentence", "[", "i", "-", "2", "]", ".", "lower", "(", ")", "if", "i", "+", "1", "<", "len", "(", "sentence", ")", ":", "yield", "\"word+1:{}\"", "+", "sentence", "[", "i", "+", "1", "]", ".", "lower", "(", ")", "if", "i", "+", "2", "<", "len", "(", "sentence", ")", ":", "yield", "\"word+2:{}\"", "+", "sentence", "[", "i", "+", "2", "]", ".", "lower", "(", ")" ]
Features for i'th token in sentence. Currently baseline named-entity recognition features, but these can easily be changed to do POS tagging or chunking.
[ "Features", "for", "i", "th", "token", "in", "sentence", "." ]
python
train
facetoe/zenpy
zenpy/lib/api.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L1470-L1478
def count_many(self, views, include=None): """ Return many ViewCounts. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param views: iterable of View or view ids """ return self._get(self._build_url(self.endpoint(count_many=views, include=include)))
[ "def", "count_many", "(", "self", ",", "views", ",", "include", "=", "None", ")", ":", "return", "self", ".", "_get", "(", "self", ".", "_build_url", "(", "self", ".", "endpoint", "(", "count_many", "=", "views", ",", "include", "=", "include", ")", ")", ")" ]
Return many ViewCounts. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param views: iterable of View or view ids
[ "Return", "many", "ViewCounts", "." ]
python
train
pyviz/imagen
imagen/colorspaces.py
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/colorspaces.py#L435-L442
def _swaplch(LCH): "Reverse the order of an LCH numpy dstack or tuple for analysis." try: # Numpy array L,C,H = np.dsplit(LCH,3) return np.dstack((H,C,L)) except: # Tuple L,C,H = LCH return H,C,L
[ "def", "_swaplch", "(", "LCH", ")", ":", "try", ":", "# Numpy array", "L", ",", "C", ",", "H", "=", "np", ".", "dsplit", "(", "LCH", ",", "3", ")", "return", "np", ".", "dstack", "(", "(", "H", ",", "C", ",", "L", ")", ")", "except", ":", "# Tuple", "L", ",", "C", ",", "H", "=", "LCH", "return", "H", ",", "C", ",", "L" ]
Reverse the order of an LCH numpy dstack or tuple for analysis.
[ "Reverse", "the", "order", "of", "an", "LCH", "numpy", "dstack", "or", "tuple", "for", "analysis", "." ]
python
train
cozy/python_cozy_management
cozy_management/couchdb.py
https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/couchdb.py#L50-L59
def get_admin(): ''' Return the actual admin from token file ''' if os.path.isfile(LOGIN_FILENAME): with open(LOGIN_FILENAME, 'r') as token_file: old_login, old_password = token_file.read().splitlines()[:2] return old_login, old_password else: return None, None
[ "def", "get_admin", "(", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "LOGIN_FILENAME", ")", ":", "with", "open", "(", "LOGIN_FILENAME", ",", "'r'", ")", "as", "token_file", ":", "old_login", ",", "old_password", "=", "token_file", ".", "read", "(", ")", ".", "splitlines", "(", ")", "[", ":", "2", "]", "return", "old_login", ",", "old_password", "else", ":", "return", "None", ",", "None" ]
Return the actual admin from token file
[ "Return", "the", "actual", "admin", "from", "token", "file" ]
python
train
Azure/blobxfer
blobxfer/retry.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/retry.py#L103-L184
def _should_retry(self, context): # type: (ExponentialRetryWithMaxWait, # azure.storage.common.models.RetryContext) -> bool """Determine if retry should happen or not :param ExponentialRetryWithMaxWait self: this :param azure.storage.common.models.RetryContext context: retry context :rtype: bool :return: True if retry should happen, False otherwise """ # do not retry if max attempts equal or exceeded if context.count >= self.max_attempts: return False # get response status status = None if context.response and context.response.status: status = context.response.status # if there is no response status, then handle the exception # appropriately from the lower layer if status is None: exc = context.exception # default to not retry in unknown/unhandled exception case ret = False # requests timeout, retry if isinstance(exc, requests.Timeout): ret = True elif isinstance(exc, requests.exceptions.ContentDecodingError): ret = True elif (isinstance(exc, requests.exceptions.ConnectionError) or isinstance(exc, requests.exceptions.ChunkedEncodingError)): # newer versions of requests do not expose errno on the # args[0] reason object; manually string parse if isinstance(exc.args[0], urllib3.exceptions.MaxRetryError): try: msg = exc.args[0].reason.args[0] except (AttributeError, IndexError): # unexpected/malformed exception hierarchy, don't retry pass else: if any(x in msg for x in _RETRYABLE_ERRNO_MAXRETRY): ret = True elif isinstance(exc.args[0], urllib3.exceptions.ProtocolError): try: msg = exc.args[0].args[0] except (AttributeError, IndexError): # unexpected/malformed exception hierarchy, don't retry pass else: if any(x in msg for x in _RETRYABLE_ERRNO_PROTOCOL): ret = True # fallback to string search if not ret: msg = str(exc).lower() if any(x in msg for x in _RETRYABLE_STRING_FALLBACK): ret = True return ret elif 200 <= status < 300: # failure during respond body download or parsing, so success # codes should be retried return True elif 300 <= status < 500: # response code 404 should be retried if secondary was used if (status == 404 and context.location_mode == azure.storage.common.models.LocationMode.SECONDARY): return True # response code 408 is a timeout and should be retried # response code 429 is too many requests (throttle) # TODO use "Retry-After" header for backoff amount if status == 408 or status == 429: return True return False elif status >= 500: # response codes above 500 should be retried except for # 501 (not implemented) and 505 (version not supported) if status == 501 or status == 505: return False return True else: # noqa # this should be unreachable, retry anyway return True
[ "def", "_should_retry", "(", "self", ",", "context", ")", ":", "# type: (ExponentialRetryWithMaxWait,", "# azure.storage.common.models.RetryContext) -> bool", "# do not retry if max attempts equal or exceeded", "if", "context", ".", "count", ">=", "self", ".", "max_attempts", ":", "return", "False", "# get response status", "status", "=", "None", "if", "context", ".", "response", "and", "context", ".", "response", ".", "status", ":", "status", "=", "context", ".", "response", ".", "status", "# if there is no response status, then handle the exception", "# appropriately from the lower layer", "if", "status", "is", "None", ":", "exc", "=", "context", ".", "exception", "# default to not retry in unknown/unhandled exception case", "ret", "=", "False", "# requests timeout, retry", "if", "isinstance", "(", "exc", ",", "requests", ".", "Timeout", ")", ":", "ret", "=", "True", "elif", "isinstance", "(", "exc", ",", "requests", ".", "exceptions", ".", "ContentDecodingError", ")", ":", "ret", "=", "True", "elif", "(", "isinstance", "(", "exc", ",", "requests", ".", "exceptions", ".", "ConnectionError", ")", "or", "isinstance", "(", "exc", ",", "requests", ".", "exceptions", ".", "ChunkedEncodingError", ")", ")", ":", "# newer versions of requests do not expose errno on the", "# args[0] reason object; manually string parse", "if", "isinstance", "(", "exc", ".", "args", "[", "0", "]", ",", "urllib3", ".", "exceptions", ".", "MaxRetryError", ")", ":", "try", ":", "msg", "=", "exc", ".", "args", "[", "0", "]", ".", "reason", ".", "args", "[", "0", "]", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "# unexpected/malformed exception hierarchy, don't retry", "pass", "else", ":", "if", "any", "(", "x", "in", "msg", "for", "x", "in", "_RETRYABLE_ERRNO_MAXRETRY", ")", ":", "ret", "=", "True", "elif", "isinstance", "(", "exc", ".", "args", "[", "0", "]", ",", "urllib3", ".", "exceptions", ".", "ProtocolError", ")", ":", "try", ":", "msg", "=", "exc", ".", "args", "[", "0", "]", ".", "args", "[", "0", "]", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "# unexpected/malformed exception hierarchy, don't retry", "pass", "else", ":", "if", "any", "(", "x", "in", "msg", "for", "x", "in", "_RETRYABLE_ERRNO_PROTOCOL", ")", ":", "ret", "=", "True", "# fallback to string search", "if", "not", "ret", ":", "msg", "=", "str", "(", "exc", ")", ".", "lower", "(", ")", "if", "any", "(", "x", "in", "msg", "for", "x", "in", "_RETRYABLE_STRING_FALLBACK", ")", ":", "ret", "=", "True", "return", "ret", "elif", "200", "<=", "status", "<", "300", ":", "# failure during respond body download or parsing, so success", "# codes should be retried", "return", "True", "elif", "300", "<=", "status", "<", "500", ":", "# response code 404 should be retried if secondary was used", "if", "(", "status", "==", "404", "and", "context", ".", "location_mode", "==", "azure", ".", "storage", ".", "common", ".", "models", ".", "LocationMode", ".", "SECONDARY", ")", ":", "return", "True", "# response code 408 is a timeout and should be retried", "# response code 429 is too many requests (throttle)", "# TODO use \"Retry-After\" header for backoff amount", "if", "status", "==", "408", "or", "status", "==", "429", ":", "return", "True", "return", "False", "elif", "status", ">=", "500", ":", "# response codes above 500 should be retried except for", "# 501 (not implemented) and 505 (version not supported)", "if", "status", "==", "501", "or", "status", "==", "505", ":", "return", "False", "return", "True", "else", ":", "# noqa", "# this should be unreachable, retry anyway", "return", "True" ]
Determine if retry should happen or not :param ExponentialRetryWithMaxWait self: this :param azure.storage.common.models.RetryContext context: retry context :rtype: bool :return: True if retry should happen, False otherwise
[ "Determine", "if", "retry", "should", "happen", "or", "not", ":", "param", "ExponentialRetryWithMaxWait", "self", ":", "this", ":", "param", "azure", ".", "storage", ".", "common", ".", "models", ".", "RetryContext", "context", ":", "retry", "context", ":", "rtype", ":", "bool", ":", "return", ":", "True", "if", "retry", "should", "happen", "False", "otherwise" ]
python
train
Huong-nt/flask-rak
flask_rak/core.py
https://github.com/Huong-nt/flask-rak/blob/ffe16b0fc3d49e83c1d220c445ce14632219f69d/flask_rak/core.py#L155-L172
def intent(self, intent_name): """Decorator routes an Rogo IntentRequest. Functions decorated as an intent are registered as the view function for the Intent's URL, and provide the backend responses to give your Skill its functionality. @ask.intent('WeatherIntent') def weather(city): return statement('I predict great weather for {}'.format(city)) Arguments: intent_name {str} -- Name of the intent request to be mapped to the decorated function """ def decorator(f): self._intent_view_funcs[intent_name] = f @wraps(f) def wrapper(*args, **kw): self._flask_view_func(*args, **kw) return f return decorator
[ "def", "intent", "(", "self", ",", "intent_name", ")", ":", "def", "decorator", "(", "f", ")", ":", "self", ".", "_intent_view_funcs", "[", "intent_name", "]", "=", "f", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "self", ".", "_flask_view_func", "(", "*", "args", ",", "*", "*", "kw", ")", "return", "f", "return", "decorator" ]
Decorator routes an Rogo IntentRequest. Functions decorated as an intent are registered as the view function for the Intent's URL, and provide the backend responses to give your Skill its functionality. @ask.intent('WeatherIntent') def weather(city): return statement('I predict great weather for {}'.format(city)) Arguments: intent_name {str} -- Name of the intent request to be mapped to the decorated function
[ "Decorator", "routes", "an", "Rogo", "IntentRequest", ".", "Functions", "decorated", "as", "an", "intent", "are", "registered", "as", "the", "view", "function", "for", "the", "Intent", "s", "URL", "and", "provide", "the", "backend", "responses", "to", "give", "your", "Skill", "its", "functionality", "." ]
python
train
aio-libs/aiohttp-sse
aiohttp_sse/__init__.py
https://github.com/aio-libs/aiohttp-sse/blob/5148d087f9df75ecea61f574d3c768506680e5dc/aiohttp_sse/__init__.py#L113-L121
async def wait(self): """EventSourceResponse object is used for streaming data to the client, this method returns future, so we can wain until connection will be closed or other task explicitly call ``stop_streaming`` method. """ if self._ping_task is None: raise RuntimeError('Response is not started') with contextlib.suppress(asyncio.CancelledError): await self._ping_task
[ "async", "def", "wait", "(", "self", ")", ":", "if", "self", ".", "_ping_task", "is", "None", ":", "raise", "RuntimeError", "(", "'Response is not started'", ")", "with", "contextlib", ".", "suppress", "(", "asyncio", ".", "CancelledError", ")", ":", "await", "self", ".", "_ping_task" ]
EventSourceResponse object is used for streaming data to the client, this method returns future, so we can wain until connection will be closed or other task explicitly call ``stop_streaming`` method.
[ "EventSourceResponse", "object", "is", "used", "for", "streaming", "data", "to", "the", "client", "this", "method", "returns", "future", "so", "we", "can", "wain", "until", "connection", "will", "be", "closed", "or", "other", "task", "explicitly", "call", "stop_streaming", "method", "." ]
python
train
dmlc/gluon-nlp
scripts/word_embeddings/evaluation.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/evaluation.py#L262-L280
def log_similarity_result(logfile, result): """Log a similarity evaluation result dictionary as TSV to logfile.""" assert result['task'] == 'similarity' if not logfile: return with open(logfile, 'a') as f: f.write('\t'.join([ str(result['global_step']), result['task'], result['dataset_name'], json.dumps(result['dataset_kwargs']), result['similarity_function'], str(result['spearmanr']), str(result['num_dropped']), ])) f.write('\n')
[ "def", "log_similarity_result", "(", "logfile", ",", "result", ")", ":", "assert", "result", "[", "'task'", "]", "==", "'similarity'", "if", "not", "logfile", ":", "return", "with", "open", "(", "logfile", ",", "'a'", ")", "as", "f", ":", "f", ".", "write", "(", "'\\t'", ".", "join", "(", "[", "str", "(", "result", "[", "'global_step'", "]", ")", ",", "result", "[", "'task'", "]", ",", "result", "[", "'dataset_name'", "]", ",", "json", ".", "dumps", "(", "result", "[", "'dataset_kwargs'", "]", ")", ",", "result", "[", "'similarity_function'", "]", ",", "str", "(", "result", "[", "'spearmanr'", "]", ")", ",", "str", "(", "result", "[", "'num_dropped'", "]", ")", ",", "]", ")", ")", "f", ".", "write", "(", "'\\n'", ")" ]
Log a similarity evaluation result dictionary as TSV to logfile.
[ "Log", "a", "similarity", "evaluation", "result", "dictionary", "as", "TSV", "to", "logfile", "." ]
python
train
ansible/molecule
molecule/command/lint.py
https://github.com/ansible/molecule/blob/766dc35b0b0ce498cd5e3a62b40f828742d0d08c/molecule/command/lint.py#L63-L80
def execute(self): """ Execute the actions necessary to perform a `molecule lint` and returns None. :return: None """ self.print_info() linters = [ l for l in [ self._config.lint, self._config.verifier.lint, self._config.provisioner.lint, ] if l ] for l in linters: l.execute()
[ "def", "execute", "(", "self", ")", ":", "self", ".", "print_info", "(", ")", "linters", "=", "[", "l", "for", "l", "in", "[", "self", ".", "_config", ".", "lint", ",", "self", ".", "_config", ".", "verifier", ".", "lint", ",", "self", ".", "_config", ".", "provisioner", ".", "lint", ",", "]", "if", "l", "]", "for", "l", "in", "linters", ":", "l", ".", "execute", "(", ")" ]
Execute the actions necessary to perform a `molecule lint` and returns None. :return: None
[ "Execute", "the", "actions", "necessary", "to", "perform", "a", "molecule", "lint", "and", "returns", "None", "." ]
python
train
pennlabs/penn-sdk-python
penn/wharton.py
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/wharton.py#L186-L189
def get_wharton_gsrs_formatted(self, sessionid, date=None): """ Return the wharton GSR listing formatted in studyspaces format. """ gsrs = self.get_wharton_gsrs(sessionid, date) return self.switch_format(gsrs)
[ "def", "get_wharton_gsrs_formatted", "(", "self", ",", "sessionid", ",", "date", "=", "None", ")", ":", "gsrs", "=", "self", ".", "get_wharton_gsrs", "(", "sessionid", ",", "date", ")", "return", "self", ".", "switch_format", "(", "gsrs", ")" ]
Return the wharton GSR listing formatted in studyspaces format.
[ "Return", "the", "wharton", "GSR", "listing", "formatted", "in", "studyspaces", "format", "." ]
python
train
deifyed/vault
libconman/target.py
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/target.py#L51-L68
def delete(self): ''' Deletes link from vault and removes database information ''' if not self._id: verbose('This target does not have an id') return False # Removes link from vault directory verbose('Removing link from vault directory') os.remove(self.vault_path) verbose('Removing information from database') # Removing information from database self.db.removeTarget(self._id) self._id = -1 return True
[ "def", "delete", "(", "self", ")", ":", "if", "not", "self", ".", "_id", ":", "verbose", "(", "'This target does not have an id'", ")", "return", "False", "# Removes link from vault directory", "verbose", "(", "'Removing link from vault directory'", ")", "os", ".", "remove", "(", "self", ".", "vault_path", ")", "verbose", "(", "'Removing information from database'", ")", "# Removing information from database", "self", ".", "db", ".", "removeTarget", "(", "self", ".", "_id", ")", "self", ".", "_id", "=", "-", "1", "return", "True" ]
Deletes link from vault and removes database information
[ "Deletes", "link", "from", "vault", "and", "removes", "database", "information" ]
python
train
eng-tools/sfsimodels
sfsimodels/models/foundations.py
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/foundations.py#L347-L357
def pad_position_w(self, i): """ Determines the position of the ith pad in the width direction. Assumes equally spaced pads. :param i: ith number of pad in width direction (0-indexed) :return: """ if i >= self.n_pads_w: raise ModelError("pad index out-of-bounds") return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2
[ "def", "pad_position_w", "(", "self", ",", "i", ")", ":", "if", "i", ">=", "self", ".", "n_pads_w", ":", "raise", "ModelError", "(", "\"pad index out-of-bounds\"", ")", "return", "(", "self", ".", "width", "-", "self", ".", "pad_width", ")", "/", "(", "self", ".", "n_pads_w", "-", "1", ")", "*", "i", "+", "self", ".", "pad_width", "/", "2" ]
Determines the position of the ith pad in the width direction. Assumes equally spaced pads. :param i: ith number of pad in width direction (0-indexed) :return:
[ "Determines", "the", "position", "of", "the", "ith", "pad", "in", "the", "width", "direction", ".", "Assumes", "equally", "spaced", "pads", "." ]
python
train
saltstack/salt
salt/modules/solarispkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solarispkg.py#L201-L385
def install(name=None, sources=None, saltenv='base', **kwargs): ''' Install the passed package. Can install packages from the following sources: * Locally (package already exists on the minion * HTTP/HTTPS server * FTP server * Salt master Returns a dict containing the new package names and versions: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Examples: .. code-block:: bash # Installing a data stream pkg that already exists on the minion salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' # Installing a data stream pkg that exists on the salt master salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]' CLI Example: .. code-block:: bash # Installing a data stream pkg that exists on a HTTP server salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]' If working with solaris zones and you want to install a package only in the global zone you can pass 'current_zone_only=True' to salt to have the package only installed in the global zone. (Behind the scenes this is passing '-G' to the pkgadd command.) Solaris default when installing a package in the global zone is to install it in all zones. This overrides that and installs the package only in the global. CLI Example: .. code-block:: bash # Installing a data stream package only in the global zone: salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True By default salt automatically provides an adminfile, to automate package installation, with these options set:: email= instance=quit partial=nocheck runlevel=nocheck idepend=nocheck rdepend=nocheck space=nocheck setuid=nocheck conflict=nocheck action=nocheck basedir=default You can override any of these options in two ways. First you can optionally pass any of the options as a kwarg to the module/state to override the default value or you can optionally pass the 'admin_source' option providing your own adminfile to the minions. Note: You can find all of the possible options to provide to the adminfile by reading the admin man page: .. code-block:: bash man -s 4 admin CLI Example: .. code-block:: bash # Overriding the 'instance' adminfile option when calling the module directly salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite" SLS Example: .. code-block:: yaml # Overriding the 'instance' adminfile option when used in a state SMClgcc346: pkg.installed: - sources: - SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg - instance: overwrite .. note:: The ID declaration is ignored, as the package name is read from the ``sources`` parameter. CLI Example: .. code-block:: bash # Providing your own adminfile when calling the module directly salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>' # Providing your own adminfile when using states <pkg name>: pkg.installed: - sources: - <pkg name>: salt://pkgs/<pkg filename> - admin_source: salt://pkgs/<adminfile filename> .. note:: The ID declaration is ignored, as the package name is read from the ``sources`` parameter. ''' if salt.utils.data.is_true(kwargs.get('refresh')): log.warning('\'refresh\' argument not implemented for solarispkg ' 'module') # pkgs is not supported, but must be passed here for API compatibility pkgs = kwargs.pop('pkgs', None) try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, sources, **kwargs ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} if not sources: log.error('"sources" param required for solaris pkg_add installs') return {} try: if 'admin_source' in kwargs: adminfile = __salt__['cp.cache_file'](kwargs['admin_source'], saltenv) else: adminfile = _write_adminfile(kwargs) old = list_pkgs() cmd_prefix = ['/usr/sbin/pkgadd', '-n', '-a', adminfile] # Only makes sense in a global zone but works fine in non-globals. if kwargs.get('current_zone_only') == 'True': cmd_prefix += '-G ' errors = [] for pkg in pkg_params: cmd = cmd_prefix + ['-d', pkg, 'all'] # Install the package{s} out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if out['retcode'] != 0 and out['stderr']: errors.append(out['stderr']) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) finally: # Remove the temp adminfile if 'admin_source' not in kwargs: try: os.remove(adminfile) except (NameError, OSError): pass return ret
[ "def", "install", "(", "name", "=", "None", ",", "sources", "=", "None", ",", "saltenv", "=", "'base'", ",", "*", "*", "kwargs", ")", ":", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "kwargs", ".", "get", "(", "'refresh'", ")", ")", ":", "log", ".", "warning", "(", "'\\'refresh\\' argument not implemented for solarispkg '", "'module'", ")", "# pkgs is not supported, but must be passed here for API compatibility", "pkgs", "=", "kwargs", ".", "pop", "(", "'pkgs'", ",", "None", ")", "try", ":", "pkg_params", ",", "pkg_type", "=", "__salt__", "[", "'pkg_resource.parse_targets'", "]", "(", "name", ",", "pkgs", ",", "sources", ",", "*", "*", "kwargs", ")", "except", "MinionError", "as", "exc", ":", "raise", "CommandExecutionError", "(", "exc", ")", "if", "not", "pkg_params", ":", "return", "{", "}", "if", "not", "sources", ":", "log", ".", "error", "(", "'\"sources\" param required for solaris pkg_add installs'", ")", "return", "{", "}", "try", ":", "if", "'admin_source'", "in", "kwargs", ":", "adminfile", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "kwargs", "[", "'admin_source'", "]", ",", "saltenv", ")", "else", ":", "adminfile", "=", "_write_adminfile", "(", "kwargs", ")", "old", "=", "list_pkgs", "(", ")", "cmd_prefix", "=", "[", "'/usr/sbin/pkgadd'", ",", "'-n'", ",", "'-a'", ",", "adminfile", "]", "# Only makes sense in a global zone but works fine in non-globals.", "if", "kwargs", ".", "get", "(", "'current_zone_only'", ")", "==", "'True'", ":", "cmd_prefix", "+=", "'-G '", "errors", "=", "[", "]", "for", "pkg", "in", "pkg_params", ":", "cmd", "=", "cmd_prefix", "+", "[", "'-d'", ",", "pkg", ",", "'all'", "]", "# Install the package{s}", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "output_loglevel", "=", "'trace'", ",", "python_shell", "=", "False", ")", "if", "out", "[", "'retcode'", "]", "!=", "0", "and", "out", "[", "'stderr'", "]", ":", "errors", ".", "append", "(", "out", "[", "'stderr'", "]", ")", "__context__", ".", "pop", "(", "'pkg.list_pkgs'", ",", "None", ")", "new", "=", "list_pkgs", "(", ")", "ret", "=", "salt", ".", "utils", ".", "data", ".", "compare_dicts", "(", "old", ",", "new", ")", "if", "errors", ":", "raise", "CommandExecutionError", "(", "'Problem encountered installing package(s)'", ",", "info", "=", "{", "'errors'", ":", "errors", ",", "'changes'", ":", "ret", "}", ")", "finally", ":", "# Remove the temp adminfile", "if", "'admin_source'", "not", "in", "kwargs", ":", "try", ":", "os", ".", "remove", "(", "adminfile", ")", "except", "(", "NameError", ",", "OSError", ")", ":", "pass", "return", "ret" ]
Install the passed package. Can install packages from the following sources: * Locally (package already exists on the minion * HTTP/HTTPS server * FTP server * Salt master Returns a dict containing the new package names and versions: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Examples: .. code-block:: bash # Installing a data stream pkg that already exists on the minion salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' # Installing a data stream pkg that exists on the salt master salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]' CLI Example: .. code-block:: bash # Installing a data stream pkg that exists on a HTTP server salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]' If working with solaris zones and you want to install a package only in the global zone you can pass 'current_zone_only=True' to salt to have the package only installed in the global zone. (Behind the scenes this is passing '-G' to the pkgadd command.) Solaris default when installing a package in the global zone is to install it in all zones. This overrides that and installs the package only in the global. CLI Example: .. code-block:: bash # Installing a data stream package only in the global zone: salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True By default salt automatically provides an adminfile, to automate package installation, with these options set:: email= instance=quit partial=nocheck runlevel=nocheck idepend=nocheck rdepend=nocheck space=nocheck setuid=nocheck conflict=nocheck action=nocheck basedir=default You can override any of these options in two ways. First you can optionally pass any of the options as a kwarg to the module/state to override the default value or you can optionally pass the 'admin_source' option providing your own adminfile to the minions. Note: You can find all of the possible options to provide to the adminfile by reading the admin man page: .. code-block:: bash man -s 4 admin CLI Example: .. code-block:: bash # Overriding the 'instance' adminfile option when calling the module directly salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite" SLS Example: .. code-block:: yaml # Overriding the 'instance' adminfile option when used in a state SMClgcc346: pkg.installed: - sources: - SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg - instance: overwrite .. note:: The ID declaration is ignored, as the package name is read from the ``sources`` parameter. CLI Example: .. code-block:: bash # Providing your own adminfile when calling the module directly salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>' # Providing your own adminfile when using states <pkg name>: pkg.installed: - sources: - <pkg name>: salt://pkgs/<pkg filename> - admin_source: salt://pkgs/<adminfile filename> .. note:: The ID declaration is ignored, as the package name is read from the ``sources`` parameter.
[ "Install", "the", "passed", "package", ".", "Can", "install", "packages", "from", "the", "following", "sources", ":" ]
python
train
bwohlberg/sporco
sporco/plot.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/plot.py#L76-L171
def attach_zoom(ax, scaling=2.0): """ Attach an event handler that supports zooming within a plot using the mouse scroll wheel. Parameters ---------- ax : :class:`matplotlib.axes.Axes` object Axes to which event handling is to be attached scaling : float, optional (default 2.0) Scaling factor for zooming in and out Returns ------- zoom : function Mouse scroll wheel event handler function """ # See https://stackoverflow.com/questions/11551049 def zoom(event): # Get the current x and y limits cur_xlim = ax.get_xlim() cur_ylim = ax.get_ylim() # Get event location xdata = event.xdata ydata = event.ydata # Return if cursor is not over valid region of plot if xdata is None or ydata is None: return if event.button == 'up': # Deal with zoom in scale_factor = 1.0 / scaling elif event.button == 'down': # Deal with zoom out scale_factor = scaling # Get distance from the cursor to the edge of the figure frame x_left = xdata - cur_xlim[0] x_right = cur_xlim[1] - xdata y_top = ydata - cur_ylim[0] y_bottom = cur_ylim[1] - ydata # Calculate new x and y limits new_xlim = (xdata - x_left * scale_factor, xdata + x_right * scale_factor) new_ylim = (ydata - y_top * scale_factor, ydata + y_bottom * scale_factor) # Ensure that x limit range is no larger than that of the reference if np.diff(new_xlim) > np.diff(zoom.xlim_ref): new_xlim *= np.diff(zoom.xlim_ref) / np.diff(new_xlim) # Ensure that lower x limit is not less than that of the reference if new_xlim[0] < zoom.xlim_ref[0]: new_xlim += np.array(zoom.xlim_ref[0] - new_xlim[0]) # Ensure that upper x limit is not greater than that of the reference if new_xlim[1] > zoom.xlim_ref[1]: new_xlim -= np.array(new_xlim[1] - zoom.xlim_ref[1]) # Ensure that ylim tuple has the smallest value first if zoom.ylim_ref[1] < zoom.ylim_ref[0]: ylim_ref = zoom.ylim_ref[::-1] new_ylim = new_ylim[::-1] else: ylim_ref = zoom.ylim_ref # Ensure that y limit range is no larger than that of the reference if np.diff(new_ylim) > np.diff(ylim_ref): new_ylim *= np.diff(ylim_ref) / np.diff(new_ylim) # Ensure that lower y limit is not less than that of the reference if new_ylim[0] < ylim_ref[0]: new_ylim += np.array(ylim_ref[0] - new_ylim[0]) # Ensure that upper y limit is not greater than that of the reference if new_ylim[1] > ylim_ref[1]: new_ylim -= np.array(new_ylim[1] - ylim_ref[1]) # Return the ylim tuple to its original order if zoom.ylim_ref[1] < zoom.ylim_ref[0]: new_ylim = new_ylim[::-1] # Set new x and y limits ax.set_xlim(new_xlim) ax.set_ylim(new_ylim) # Force redraw ax.figure.canvas.draw() # Record reference x and y limits prior to any zooming zoom.xlim_ref = ax.get_xlim() zoom.ylim_ref = ax.get_ylim() # Get figure for specified axes and attach the event handler fig = ax.get_figure() fig.canvas.mpl_connect('scroll_event', zoom) return zoom
[ "def", "attach_zoom", "(", "ax", ",", "scaling", "=", "2.0", ")", ":", "# See https://stackoverflow.com/questions/11551049", "def", "zoom", "(", "event", ")", ":", "# Get the current x and y limits", "cur_xlim", "=", "ax", ".", "get_xlim", "(", ")", "cur_ylim", "=", "ax", ".", "get_ylim", "(", ")", "# Get event location", "xdata", "=", "event", ".", "xdata", "ydata", "=", "event", ".", "ydata", "# Return if cursor is not over valid region of plot", "if", "xdata", "is", "None", "or", "ydata", "is", "None", ":", "return", "if", "event", ".", "button", "==", "'up'", ":", "# Deal with zoom in", "scale_factor", "=", "1.0", "/", "scaling", "elif", "event", ".", "button", "==", "'down'", ":", "# Deal with zoom out", "scale_factor", "=", "scaling", "# Get distance from the cursor to the edge of the figure frame", "x_left", "=", "xdata", "-", "cur_xlim", "[", "0", "]", "x_right", "=", "cur_xlim", "[", "1", "]", "-", "xdata", "y_top", "=", "ydata", "-", "cur_ylim", "[", "0", "]", "y_bottom", "=", "cur_ylim", "[", "1", "]", "-", "ydata", "# Calculate new x and y limits", "new_xlim", "=", "(", "xdata", "-", "x_left", "*", "scale_factor", ",", "xdata", "+", "x_right", "*", "scale_factor", ")", "new_ylim", "=", "(", "ydata", "-", "y_top", "*", "scale_factor", ",", "ydata", "+", "y_bottom", "*", "scale_factor", ")", "# Ensure that x limit range is no larger than that of the reference", "if", "np", ".", "diff", "(", "new_xlim", ")", ">", "np", ".", "diff", "(", "zoom", ".", "xlim_ref", ")", ":", "new_xlim", "*=", "np", ".", "diff", "(", "zoom", ".", "xlim_ref", ")", "/", "np", ".", "diff", "(", "new_xlim", ")", "# Ensure that lower x limit is not less than that of the reference", "if", "new_xlim", "[", "0", "]", "<", "zoom", ".", "xlim_ref", "[", "0", "]", ":", "new_xlim", "+=", "np", ".", "array", "(", "zoom", ".", "xlim_ref", "[", "0", "]", "-", "new_xlim", "[", "0", "]", ")", "# Ensure that upper x limit is not greater than that of the reference", "if", "new_xlim", "[", "1", "]", ">", "zoom", ".", "xlim_ref", "[", "1", "]", ":", "new_xlim", "-=", "np", ".", "array", "(", "new_xlim", "[", "1", "]", "-", "zoom", ".", "xlim_ref", "[", "1", "]", ")", "# Ensure that ylim tuple has the smallest value first", "if", "zoom", ".", "ylim_ref", "[", "1", "]", "<", "zoom", ".", "ylim_ref", "[", "0", "]", ":", "ylim_ref", "=", "zoom", ".", "ylim_ref", "[", ":", ":", "-", "1", "]", "new_ylim", "=", "new_ylim", "[", ":", ":", "-", "1", "]", "else", ":", "ylim_ref", "=", "zoom", ".", "ylim_ref", "# Ensure that y limit range is no larger than that of the reference", "if", "np", ".", "diff", "(", "new_ylim", ")", ">", "np", ".", "diff", "(", "ylim_ref", ")", ":", "new_ylim", "*=", "np", ".", "diff", "(", "ylim_ref", ")", "/", "np", ".", "diff", "(", "new_ylim", ")", "# Ensure that lower y limit is not less than that of the reference", "if", "new_ylim", "[", "0", "]", "<", "ylim_ref", "[", "0", "]", ":", "new_ylim", "+=", "np", ".", "array", "(", "ylim_ref", "[", "0", "]", "-", "new_ylim", "[", "0", "]", ")", "# Ensure that upper y limit is not greater than that of the reference", "if", "new_ylim", "[", "1", "]", ">", "ylim_ref", "[", "1", "]", ":", "new_ylim", "-=", "np", ".", "array", "(", "new_ylim", "[", "1", "]", "-", "ylim_ref", "[", "1", "]", ")", "# Return the ylim tuple to its original order", "if", "zoom", ".", "ylim_ref", "[", "1", "]", "<", "zoom", ".", "ylim_ref", "[", "0", "]", ":", "new_ylim", "=", "new_ylim", "[", ":", ":", "-", "1", "]", "# Set new x and y limits", "ax", ".", "set_xlim", "(", "new_xlim", ")", "ax", ".", "set_ylim", "(", "new_ylim", ")", "# Force redraw", "ax", ".", "figure", ".", "canvas", ".", "draw", "(", ")", "# Record reference x and y limits prior to any zooming", "zoom", ".", "xlim_ref", "=", "ax", ".", "get_xlim", "(", ")", "zoom", ".", "ylim_ref", "=", "ax", ".", "get_ylim", "(", ")", "# Get figure for specified axes and attach the event handler", "fig", "=", "ax", ".", "get_figure", "(", ")", "fig", ".", "canvas", ".", "mpl_connect", "(", "'scroll_event'", ",", "zoom", ")", "return", "zoom" ]
Attach an event handler that supports zooming within a plot using the mouse scroll wheel. Parameters ---------- ax : :class:`matplotlib.axes.Axes` object Axes to which event handling is to be attached scaling : float, optional (default 2.0) Scaling factor for zooming in and out Returns ------- zoom : function Mouse scroll wheel event handler function
[ "Attach", "an", "event", "handler", "that", "supports", "zooming", "within", "a", "plot", "using", "the", "mouse", "scroll", "wheel", "." ]
python
train
LionelR/pyair
pyair/xair.py
https://github.com/LionelR/pyair/blob/467e8a843ca9f882f8bb2958805b7293591996ad/pyair/xair.py#L519-L570
def get_manuelles(self, site, code_parametre, debut, fin, court=False): """ Recupération des mesures manuelles (labo) pour un site site: numéro du site (voir fonction liste_sites_prelevement) code_parametre: code ISO du paramètre à rechercher (C6H6=V4) debut: date de début du premier prélèvement fin: date de fin du dernier prélèvement court: Renvoie un tableau au format court ou long (colonnes) """ condition = "WHERE MESLA.NOPOL='%s' " % code_parametre condition += "AND SITMETH.NSIT=%s " % site condition += "AND PRELEV.DATE_DEB>=TO_DATE('%s', 'YYYY-MM-DD') " % debut condition += "AND PRELEV.DATE_FIN<=TO_DATE('%s', 'YYYY-MM-DD') " % fin if court == False: select = """SELECT MESLA.LIBELLE AS MESURE, METH.LIBELLE AS METHODE, ANA.VALEUR AS VALEUR, MESLA.UNITE AS UNITE, ANA.CODE_QUALITE AS CODE_QUALITE, ANA.DATE_ANA AS DATE_ANALYSE, ANA.ID_LABO AS LABO, PRELEV.DATE_DEB AS DEBUT, PRELEV.DATE_FIN AS FIN, ANA.COMMENTAIRE AS COMMENTAIRE, SITE.LIBELLE AS SITE, SITE.AXE AS ADRESSE, COM.NOM_COMMUNE AS COMMUNE""" else: select = """SELECT MESLA.LIBELLE AS MESURE, ANA.VALEUR AS VALEUR, MESLA.UNITE AS UNITE, ANA.CODE_QUALITE AS CODE_QUALITE, PRELEV.DATE_DEB AS DEBUT, PRELEV.DATE_FIN AS FIN, SITE.AXE AS ADRESSE, COM.NOM_COMMUNE AS COMMUNE""" _sql = """%s FROM ANALYSE ANA INNER JOIN PRELEVEMENT PRELEV ON (ANA.CODE_PRELEV=PRELEV.CODE_PRELEV AND ANA.CODE_SMP=PRELEV.CODE_SMP) INNER JOIN MESURE_LABO MESLA ON (ANA.CODE_MES_LABO=MESLA.CODE_MES_LABO AND ANA.CODE_SMP=MESLA.CODE_SMP) INNER JOIN SITE_METH_PRELEV SITMETH ON (ANA.CODE_SMP=SITMETH.CODE_SMP) INNER JOIN METH_PRELEVEMENT METH ON (SITMETH.CODE_METH_P=METH.CODE_METH_P) INNER JOIN SITE_PRELEVEMENT SITE ON (SITE.NSIT=SITMETH.NSIT) INNER JOIN COMMUNE COM ON (COM.NINSEE=SITE.NINSEE) %s ORDER BY MESLA.NOPOL,MESLA.LIBELLE,PRELEV.DATE_DEB""" % (select, condition) return psql.read_sql(_sql, self.conn)
[ "def", "get_manuelles", "(", "self", ",", "site", ",", "code_parametre", ",", "debut", ",", "fin", ",", "court", "=", "False", ")", ":", "condition", "=", "\"WHERE MESLA.NOPOL='%s' \"", "%", "code_parametre", "condition", "+=", "\"AND SITMETH.NSIT=%s \"", "%", "site", "condition", "+=", "\"AND PRELEV.DATE_DEB>=TO_DATE('%s', 'YYYY-MM-DD') \"", "%", "debut", "condition", "+=", "\"AND PRELEV.DATE_FIN<=TO_DATE('%s', 'YYYY-MM-DD') \"", "%", "fin", "if", "court", "==", "False", ":", "select", "=", "\"\"\"SELECT\n MESLA.LIBELLE AS MESURE,\n METH.LIBELLE AS METHODE,\n ANA.VALEUR AS VALEUR,\n MESLA.UNITE AS UNITE,\n ANA.CODE_QUALITE AS CODE_QUALITE,\n ANA.DATE_ANA AS DATE_ANALYSE,\n ANA.ID_LABO AS LABO,\n PRELEV.DATE_DEB AS DEBUT,\n PRELEV.DATE_FIN AS FIN,\n ANA.COMMENTAIRE AS COMMENTAIRE,\n SITE.LIBELLE AS SITE,\n SITE.AXE AS ADRESSE,\n COM.NOM_COMMUNE AS COMMUNE\"\"\"", "else", ":", "select", "=", "\"\"\"SELECT\n MESLA.LIBELLE AS MESURE,\n ANA.VALEUR AS VALEUR,\n MESLA.UNITE AS UNITE,\n ANA.CODE_QUALITE AS CODE_QUALITE,\n PRELEV.DATE_DEB AS DEBUT,\n PRELEV.DATE_FIN AS FIN,\n SITE.AXE AS ADRESSE,\n COM.NOM_COMMUNE AS COMMUNE\"\"\"", "_sql", "=", "\"\"\"%s\n FROM ANALYSE ANA\n INNER JOIN PRELEVEMENT PRELEV ON (ANA.CODE_PRELEV=PRELEV.CODE_PRELEV AND ANA.CODE_SMP=PRELEV.CODE_SMP)\n INNER JOIN MESURE_LABO MESLA ON (ANA.CODE_MES_LABO=MESLA.CODE_MES_LABO AND ANA.CODE_SMP=MESLA.CODE_SMP)\n INNER JOIN SITE_METH_PRELEV SITMETH ON (ANA.CODE_SMP=SITMETH.CODE_SMP)\n INNER JOIN METH_PRELEVEMENT METH ON (SITMETH.CODE_METH_P=METH.CODE_METH_P)\n INNER JOIN SITE_PRELEVEMENT SITE ON (SITE.NSIT=SITMETH.NSIT)\n INNER JOIN COMMUNE COM ON (COM.NINSEE=SITE.NINSEE)\n %s\n ORDER BY MESLA.NOPOL,MESLA.LIBELLE,PRELEV.DATE_DEB\"\"\"", "%", "(", "select", ",", "condition", ")", "return", "psql", ".", "read_sql", "(", "_sql", ",", "self", ".", "conn", ")" ]
Recupération des mesures manuelles (labo) pour un site site: numéro du site (voir fonction liste_sites_prelevement) code_parametre: code ISO du paramètre à rechercher (C6H6=V4) debut: date de début du premier prélèvement fin: date de fin du dernier prélèvement court: Renvoie un tableau au format court ou long (colonnes)
[ "Recupération", "des", "mesures", "manuelles", "(", "labo", ")", "pour", "un", "site" ]
python
valid
RLBot/RLBot
src/main/python/rlbot/gui/qt_root.py
https://github.com/RLBot/RLBot/blob/3f9b6bec8b9baf4dcfff0f6cf3103c8744ac6234/src/main/python/rlbot/gui/qt_root.py#L321-L341
def team_settings_edit_event(self, value=None): """ Handles the events when editing a value regarding the team settings :param value: the new value to store in the config :return: """ sender = self.sender() if value is None: value = sender.text() if sender is self.blue_name_lineedit: self.overall_config.set_value(TEAM_CONFIGURATION_HEADER, "Team Blue Name", value) elif sender is self.orange_name_lineedit: self.overall_config.set_value(TEAM_CONFIGURATION_HEADER, "Team Orange Name", value) elif sender is self.blue_color_spinbox: self.overall_config.set_value(TEAM_CONFIGURATION_HEADER, "Team Blue Color", value) elif sender is self.orange_color_spinbox: self.overall_config.set_value(TEAM_CONFIGURATION_HEADER, "Team Orange Color", value) if self.cfg_autosave_checkbutton.isChecked() and os.path.isfile(self.overall_config_path): self.save_overall_config(10)
[ "def", "team_settings_edit_event", "(", "self", ",", "value", "=", "None", ")", ":", "sender", "=", "self", ".", "sender", "(", ")", "if", "value", "is", "None", ":", "value", "=", "sender", ".", "text", "(", ")", "if", "sender", "is", "self", ".", "blue_name_lineedit", ":", "self", ".", "overall_config", ".", "set_value", "(", "TEAM_CONFIGURATION_HEADER", ",", "\"Team Blue Name\"", ",", "value", ")", "elif", "sender", "is", "self", ".", "orange_name_lineedit", ":", "self", ".", "overall_config", ".", "set_value", "(", "TEAM_CONFIGURATION_HEADER", ",", "\"Team Orange Name\"", ",", "value", ")", "elif", "sender", "is", "self", ".", "blue_color_spinbox", ":", "self", ".", "overall_config", ".", "set_value", "(", "TEAM_CONFIGURATION_HEADER", ",", "\"Team Blue Color\"", ",", "value", ")", "elif", "sender", "is", "self", ".", "orange_color_spinbox", ":", "self", ".", "overall_config", ".", "set_value", "(", "TEAM_CONFIGURATION_HEADER", ",", "\"Team Orange Color\"", ",", "value", ")", "if", "self", ".", "cfg_autosave_checkbutton", ".", "isChecked", "(", ")", "and", "os", ".", "path", ".", "isfile", "(", "self", ".", "overall_config_path", ")", ":", "self", ".", "save_overall_config", "(", "10", ")" ]
Handles the events when editing a value regarding the team settings :param value: the new value to store in the config :return:
[ "Handles", "the", "events", "when", "editing", "a", "value", "regarding", "the", "team", "settings", ":", "param", "value", ":", "the", "new", "value", "to", "store", "in", "the", "config", ":", "return", ":" ]
python
train
thewca/wca-regulations-compiler
wrc/parse/lexer.py
https://github.com/thewca/wca-regulations-compiler/blob/3ebbd8fe8fec7c9167296f59b2677696fe61a954/wrc/parse/lexer.py#L55-L61
def t_LABELDECL(self, token): ur'-\s<label>\s*\[(?P<label>.+?)\]\s*(?P<text>.+?)\n' label = token.lexer.lexmatch.group("label").decode("utf8") text = token.lexer.lexmatch.group("text").decode("utf8") token.value = (label, text) token.lexer.lineno += 1 return token
[ "def", "t_LABELDECL", "(", "self", ",", "token", ")", ":", "label", "=", "token", ".", "lexer", ".", "lexmatch", ".", "group", "(", "\"label\"", ")", ".", "decode", "(", "\"utf8\"", ")", "text", "=", "token", ".", "lexer", ".", "lexmatch", ".", "group", "(", "\"text\"", ")", ".", "decode", "(", "\"utf8\"", ")", "token", ".", "value", "=", "(", "label", ",", "text", ")", "token", ".", "lexer", ".", "lineno", "+=", "1", "return", "token" ]
ur'-\s<label>\s*\[(?P<label>.+?)\]\s*(?P<text>.+?)\n
[ "ur", "-", "\\", "s<label", ">", "\\", "s", "*", "\\", "[", "(", "?P<label", ">", ".", "+", "?", ")", "\\", "]", "\\", "s", "*", "(", "?P<text", ">", ".", "+", "?", ")", "\\", "n" ]
python
train
napalm-automation/napalm
napalm/junos/junos.py
https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/junos/junos.py#L249-L252
def load_merge_candidate(self, filename=None, config=None): """Open the candidate config and replace.""" self.config_replace = False self._load_candidate(filename, config, False)
[ "def", "load_merge_candidate", "(", "self", ",", "filename", "=", "None", ",", "config", "=", "None", ")", ":", "self", ".", "config_replace", "=", "False", "self", ".", "_load_candidate", "(", "filename", ",", "config", ",", "False", ")" ]
Open the candidate config and replace.
[ "Open", "the", "candidate", "config", "and", "replace", "." ]
python
train
inasafe/inasafe
safe/gui/tools/metadata_converter_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/metadata_converter_dialog.py#L297-L325
def select_output_directory(self): """Select output directory""" # Input layer input_layer_path = self.layer.source() input_file_name = os.path.basename(input_layer_path) input_extension = os.path.splitext(input_file_name)[1] # Get current path current_file_path = self.output_path_line_edit.text() if not current_file_path or not os.path.exists(current_file_path): current_file_path = input_layer_path # Filtering based on input layer extension_mapping = { '.shp': tr('Shapefile (*.shp);;'), '.geojson': tr('GeoJSON (*.geojson);;'), '.tif': tr('Raster TIF/TIFF (*.tif, *.tiff);;'), '.tiff': tr('Raster TIF/TIFF (*.tiff, *.tiff);;'), '.asc': tr('Raster ASCII File (*.asc);;'), } # Open File Dialog file_path, __ = QFileDialog.getSaveFileName( self, tr('Output File'), current_file_path, extension_mapping[input_extension] ) if file_path: self.output_path_line_edit.setText(file_path)
[ "def", "select_output_directory", "(", "self", ")", ":", "# Input layer", "input_layer_path", "=", "self", ".", "layer", ".", "source", "(", ")", "input_file_name", "=", "os", ".", "path", ".", "basename", "(", "input_layer_path", ")", "input_extension", "=", "os", ".", "path", ".", "splitext", "(", "input_file_name", ")", "[", "1", "]", "# Get current path", "current_file_path", "=", "self", ".", "output_path_line_edit", ".", "text", "(", ")", "if", "not", "current_file_path", "or", "not", "os", ".", "path", ".", "exists", "(", "current_file_path", ")", ":", "current_file_path", "=", "input_layer_path", "# Filtering based on input layer", "extension_mapping", "=", "{", "'.shp'", ":", "tr", "(", "'Shapefile (*.shp);;'", ")", ",", "'.geojson'", ":", "tr", "(", "'GeoJSON (*.geojson);;'", ")", ",", "'.tif'", ":", "tr", "(", "'Raster TIF/TIFF (*.tif, *.tiff);;'", ")", ",", "'.tiff'", ":", "tr", "(", "'Raster TIF/TIFF (*.tiff, *.tiff);;'", ")", ",", "'.asc'", ":", "tr", "(", "'Raster ASCII File (*.asc);;'", ")", ",", "}", "# Open File Dialog", "file_path", ",", "__", "=", "QFileDialog", ".", "getSaveFileName", "(", "self", ",", "tr", "(", "'Output File'", ")", ",", "current_file_path", ",", "extension_mapping", "[", "input_extension", "]", ")", "if", "file_path", ":", "self", ".", "output_path_line_edit", ".", "setText", "(", "file_path", ")" ]
Select output directory
[ "Select", "output", "directory" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/nbformat/v3/nbbase.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/v3/nbbase.py#L158-L173
def new_notebook(name=None, metadata=None, worksheets=None): """Create a notebook by name, id and a list of worksheets.""" nb = NotebookNode() nb.nbformat = nbformat nb.nbformat_minor = nbformat_minor if worksheets is None: nb.worksheets = [] else: nb.worksheets = list(worksheets) if metadata is None: nb.metadata = new_metadata() else: nb.metadata = NotebookNode(metadata) if name is not None: nb.metadata.name = unicode(name) return nb
[ "def", "new_notebook", "(", "name", "=", "None", ",", "metadata", "=", "None", ",", "worksheets", "=", "None", ")", ":", "nb", "=", "NotebookNode", "(", ")", "nb", ".", "nbformat", "=", "nbformat", "nb", ".", "nbformat_minor", "=", "nbformat_minor", "if", "worksheets", "is", "None", ":", "nb", ".", "worksheets", "=", "[", "]", "else", ":", "nb", ".", "worksheets", "=", "list", "(", "worksheets", ")", "if", "metadata", "is", "None", ":", "nb", ".", "metadata", "=", "new_metadata", "(", ")", "else", ":", "nb", ".", "metadata", "=", "NotebookNode", "(", "metadata", ")", "if", "name", "is", "not", "None", ":", "nb", ".", "metadata", ".", "name", "=", "unicode", "(", "name", ")", "return", "nb" ]
Create a notebook by name, id and a list of worksheets.
[ "Create", "a", "notebook", "by", "name", "id", "and", "a", "list", "of", "worksheets", "." ]
python
test
awslabs/sockeye
sockeye/coverage.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/coverage.py#L218-L265
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable: """ Returns callable to be used for updating coverage vectors in a sequence decoder. :param source: Shape: (batch_size, seq_len, encoder_num_hidden). :param source_length: Shape: (batch_size,). :param source_seq_len: Maximum length of source sequences. :return: Coverage callable. """ def update_coverage(prev_hidden: mx.sym.Symbol, attention_prob_scores: mx.sym.Symbol, prev_coverage: mx.sym.Symbol): """ :param prev_hidden: Previous hidden decoder state. Shape: (batch_size, decoder_num_hidden). :param attention_prob_scores: Current attention scores. Shape: (batch_size, source_seq_len). :param prev_coverage: Shape: (batch_size, source_seq_len, coverage_num_hidden). :return: Updated coverage matrix . Shape: (batch_size, source_seq_len, coverage_num_hidden). """ # (batch_size, source_seq_len, decoder_num_hidden) expanded_decoder = mx.sym.broadcast_axis( data=mx.sym.expand_dims(data=prev_hidden, axis=1, name="%sexpand_decoder" % self.prefix), axis=1, size=source_seq_len, name="%sbroadcast_decoder" % self.prefix) # (batch_size, source_seq_len, 1) expanded_att_scores = mx.sym.expand_dims(data=attention_prob_scores, axis=2, name="%sexpand_attention_scores" % self.prefix) # (batch_size, source_seq_len, encoder_num_hidden + decoder_num_hidden + 1) # +1 for the attention_prob_score for the source word concat_input = mx.sym.concat(source, expanded_decoder, expanded_att_scores, dim=2, name="%sconcat_inputs" % self.prefix) # (batch_size * source_seq_len, encoder_num_hidden + decoder_num_hidden + 1) flat_input = mx.sym.reshape(concat_input, shape=(-3, -1), name="%sflatten_inputs") # coverage: (batch_size * seq_len, coverage_num_hidden) coverage = mx.sym.reshape(data=prev_coverage, shape=(-3, -1)) updated_coverage, _ = self.gru(flat_input, states=[coverage]) # coverage: (batch_size, seq_len, coverage_num_hidden) coverage = mx.sym.reshape(updated_coverage, shape=(-1, source_seq_len, self.num_hidden)) return mask_coverage(coverage, source_length) return update_coverage
[ "def", "on", "(", "self", ",", "source", ":", "mx", ".", "sym", ".", "Symbol", ",", "source_length", ":", "mx", ".", "sym", ".", "Symbol", ",", "source_seq_len", ":", "int", ")", "->", "Callable", ":", "def", "update_coverage", "(", "prev_hidden", ":", "mx", ".", "sym", ".", "Symbol", ",", "attention_prob_scores", ":", "mx", ".", "sym", ".", "Symbol", ",", "prev_coverage", ":", "mx", ".", "sym", ".", "Symbol", ")", ":", "\"\"\"\n :param prev_hidden: Previous hidden decoder state. Shape: (batch_size, decoder_num_hidden).\n :param attention_prob_scores: Current attention scores. Shape: (batch_size, source_seq_len).\n :param prev_coverage: Shape: (batch_size, source_seq_len, coverage_num_hidden).\n :return: Updated coverage matrix . Shape: (batch_size, source_seq_len, coverage_num_hidden).\n \"\"\"", "# (batch_size, source_seq_len, decoder_num_hidden)", "expanded_decoder", "=", "mx", ".", "sym", ".", "broadcast_axis", "(", "data", "=", "mx", ".", "sym", ".", "expand_dims", "(", "data", "=", "prev_hidden", ",", "axis", "=", "1", ",", "name", "=", "\"%sexpand_decoder\"", "%", "self", ".", "prefix", ")", ",", "axis", "=", "1", ",", "size", "=", "source_seq_len", ",", "name", "=", "\"%sbroadcast_decoder\"", "%", "self", ".", "prefix", ")", "# (batch_size, source_seq_len, 1)", "expanded_att_scores", "=", "mx", ".", "sym", ".", "expand_dims", "(", "data", "=", "attention_prob_scores", ",", "axis", "=", "2", ",", "name", "=", "\"%sexpand_attention_scores\"", "%", "self", ".", "prefix", ")", "# (batch_size, source_seq_len, encoder_num_hidden + decoder_num_hidden + 1)", "# +1 for the attention_prob_score for the source word", "concat_input", "=", "mx", ".", "sym", ".", "concat", "(", "source", ",", "expanded_decoder", ",", "expanded_att_scores", ",", "dim", "=", "2", ",", "name", "=", "\"%sconcat_inputs\"", "%", "self", ".", "prefix", ")", "# (batch_size * source_seq_len, encoder_num_hidden + decoder_num_hidden + 1)", "flat_input", "=", "mx", ".", "sym", ".", "reshape", "(", "concat_input", ",", "shape", "=", "(", "-", "3", ",", "-", "1", ")", ",", "name", "=", "\"%sflatten_inputs\"", ")", "# coverage: (batch_size * seq_len, coverage_num_hidden)", "coverage", "=", "mx", ".", "sym", ".", "reshape", "(", "data", "=", "prev_coverage", ",", "shape", "=", "(", "-", "3", ",", "-", "1", ")", ")", "updated_coverage", ",", "_", "=", "self", ".", "gru", "(", "flat_input", ",", "states", "=", "[", "coverage", "]", ")", "# coverage: (batch_size, seq_len, coverage_num_hidden)", "coverage", "=", "mx", ".", "sym", ".", "reshape", "(", "updated_coverage", ",", "shape", "=", "(", "-", "1", ",", "source_seq_len", ",", "self", ".", "num_hidden", ")", ")", "return", "mask_coverage", "(", "coverage", ",", "source_length", ")", "return", "update_coverage" ]
Returns callable to be used for updating coverage vectors in a sequence decoder. :param source: Shape: (batch_size, seq_len, encoder_num_hidden). :param source_length: Shape: (batch_size,). :param source_seq_len: Maximum length of source sequences. :return: Coverage callable.
[ "Returns", "callable", "to", "be", "used", "for", "updating", "coverage", "vectors", "in", "a", "sequence", "decoder", "." ]
python
train
Kozea/pygal
pygal/graph/bar.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/bar.py#L106-L136
def bar(self, serie, rescale=False): """Draw a bar graph for a serie""" serie_node = self.svg.serie(serie) bars = self.svg.node(serie_node['plot'], class_="bars") if rescale and self.secondary_series: points = self._rescale(serie.points) else: points = serie.points for i, (x, y) in enumerate(points): if None in (x, y) or (self.logarithmic and y <= 0): continue metadata = serie.metadata.get(i) val = self._format(serie, i) bar = decorate( self.svg, self.svg.node(bars, class_='bar'), metadata ) x_, y_, width, height = self._bar( serie, bar, x, y, i, self.zero, secondary=rescale ) self._confidence_interval( serie_node['overlay'], x_ + width / 2, y_, serie.values[i], metadata ) self._tooltip_and_print_values( serie_node, serie, bar, i, val, metadata, x_, y_, width, height )
[ "def", "bar", "(", "self", ",", "serie", ",", "rescale", "=", "False", ")", ":", "serie_node", "=", "self", ".", "svg", ".", "serie", "(", "serie", ")", "bars", "=", "self", ".", "svg", ".", "node", "(", "serie_node", "[", "'plot'", "]", ",", "class_", "=", "\"bars\"", ")", "if", "rescale", "and", "self", ".", "secondary_series", ":", "points", "=", "self", ".", "_rescale", "(", "serie", ".", "points", ")", "else", ":", "points", "=", "serie", ".", "points", "for", "i", ",", "(", "x", ",", "y", ")", "in", "enumerate", "(", "points", ")", ":", "if", "None", "in", "(", "x", ",", "y", ")", "or", "(", "self", ".", "logarithmic", "and", "y", "<=", "0", ")", ":", "continue", "metadata", "=", "serie", ".", "metadata", ".", "get", "(", "i", ")", "val", "=", "self", ".", "_format", "(", "serie", ",", "i", ")", "bar", "=", "decorate", "(", "self", ".", "svg", ",", "self", ".", "svg", ".", "node", "(", "bars", ",", "class_", "=", "'bar'", ")", ",", "metadata", ")", "x_", ",", "y_", ",", "width", ",", "height", "=", "self", ".", "_bar", "(", "serie", ",", "bar", ",", "x", ",", "y", ",", "i", ",", "self", ".", "zero", ",", "secondary", "=", "rescale", ")", "self", ".", "_confidence_interval", "(", "serie_node", "[", "'overlay'", "]", ",", "x_", "+", "width", "/", "2", ",", "y_", ",", "serie", ".", "values", "[", "i", "]", ",", "metadata", ")", "self", ".", "_tooltip_and_print_values", "(", "serie_node", ",", "serie", ",", "bar", ",", "i", ",", "val", ",", "metadata", ",", "x_", ",", "y_", ",", "width", ",", "height", ")" ]
Draw a bar graph for a serie
[ "Draw", "a", "bar", "graph", "for", "a", "serie" ]
python
train
restran/mountains
mountains/django/model.py
https://github.com/restran/mountains/blob/a97fee568b112f4e10d878f815d0db3dd0a98d74/mountains/django/model.py#L36-L45
def set_dict_none_default(dict_item, default_value): """ 对字典中为None的值,重新设置默认值 :param dict_item: :param default_value: :return: """ for (k, v) in iteritems(dict_item): if v is None: dict_item[k] = default_value
[ "def", "set_dict_none_default", "(", "dict_item", ",", "default_value", ")", ":", "for", "(", "k", ",", "v", ")", "in", "iteritems", "(", "dict_item", ")", ":", "if", "v", "is", "None", ":", "dict_item", "[", "k", "]", "=", "default_value" ]
对字典中为None的值,重新设置默认值 :param dict_item: :param default_value: :return:
[ "对字典中为None的值,重新设置默认值", ":", "param", "dict_item", ":", ":", "param", "default_value", ":", ":", "return", ":" ]
python
train
PMBio/limix-backup
limix/mtSet/core/plink_reader.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/mtSet/core/plink_reader.py#L36-L42
def readFAM(basefilename,usecols=None): """ helper method for speeding up read FAM """ fam = basefilename+'.fam' fam = SP.loadtxt(fam,dtype=bytes,usecols=usecols) return fam
[ "def", "readFAM", "(", "basefilename", ",", "usecols", "=", "None", ")", ":", "fam", "=", "basefilename", "+", "'.fam'", "fam", "=", "SP", ".", "loadtxt", "(", "fam", ",", "dtype", "=", "bytes", ",", "usecols", "=", "usecols", ")", "return", "fam" ]
helper method for speeding up read FAM
[ "helper", "method", "for", "speeding", "up", "read", "FAM" ]
python
train