Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
2,000 | yamcs/yamcs-python | yamcs-client/yamcs/archive/client.py | ArchiveClient.list_packets | def list_packets(self, name=None, start=None, stop=None, page_size=500, descending=False):
"""
Reads packet information between the specified start and stop
time.
Packets are sorted by generation time and sequence number.
:param ~datetime.datetime start: Minimum generation time of the returned
packets (inclusive)
:param ~datetime.datetime stop: Maximum genreation time of the returned
packets (exclusive)
:param int page_size: Page size of underlying requests. Higher values imply
less overhead, but risk hitting the maximum message size limit.
:param bool descending: If set to ``True`` packets are fetched in reverse
order (most recent first).
:rtype: ~collections.Iterable[.Packet]
"""
params = {
'order': 'desc' if descending else 'asc',
}
if name is not None:
params['name'] = name
if page_size is not None:
params['limit'] = page_size
if start is not None:
params['start'] = to_isostring(start)
if stop is not None:
params['stop'] = to_isostring(stop)
return pagination.Iterator(
client=self._client,
path='/archive/{}/packets'.format(self._instance),
params=params,
response_class=rest_pb2.ListPacketsResponse,
items_key='packet',
item_mapper=Packet,
) | python | def list_packets(self, name=None, start=None, stop=None, page_size=500, descending=False):
"""
Reads packet information between the specified start and stop
time.
Packets are sorted by generation time and sequence number.
:param ~datetime.datetime start: Minimum generation time of the returned
packets (inclusive)
:param ~datetime.datetime stop: Maximum genreation time of the returned
packets (exclusive)
:param int page_size: Page size of underlying requests. Higher values imply
less overhead, but risk hitting the maximum message size limit.
:param bool descending: If set to ``True`` packets are fetched in reverse
order (most recent first).
:rtype: ~collections.Iterable[.Packet]
"""
params = {
'order': 'desc' if descending else 'asc',
}
if name is not None:
params['name'] = name
if page_size is not None:
params['limit'] = page_size
if start is not None:
params['start'] = to_isostring(start)
if stop is not None:
params['stop'] = to_isostring(stop)
return pagination.Iterator(
client=self._client,
path='/archive/{}/packets'.format(self._instance),
params=params,
response_class=rest_pb2.ListPacketsResponse,
items_key='packet',
item_mapper=Packet,
) | ['def', 'list_packets', '(', 'self', ',', 'name', '=', 'None', ',', 'start', '=', 'None', ',', 'stop', '=', 'None', ',', 'page_size', '=', '500', ',', 'descending', '=', 'False', ')', ':', 'params', '=', '{', "'order'", ':', "'desc'", 'if', 'descending', 'else', "'asc'", ',', '}', 'if', 'name', 'is', 'not', 'None', ':', 'params', '[', "'name'", ']', '=', 'name', 'if', 'page_size', 'is', 'not', 'None', ':', 'params', '[', "'limit'", ']', '=', 'page_size', 'if', 'start', 'is', 'not', 'None', ':', 'params', '[', "'start'", ']', '=', 'to_isostring', '(', 'start', ')', 'if', 'stop', 'is', 'not', 'None', ':', 'params', '[', "'stop'", ']', '=', 'to_isostring', '(', 'stop', ')', 'return', 'pagination', '.', 'Iterator', '(', 'client', '=', 'self', '.', '_client', ',', 'path', '=', "'/archive/{}/packets'", '.', 'format', '(', 'self', '.', '_instance', ')', ',', 'params', '=', 'params', ',', 'response_class', '=', 'rest_pb2', '.', 'ListPacketsResponse', ',', 'items_key', '=', "'packet'", ',', 'item_mapper', '=', 'Packet', ',', ')'] | Reads packet information between the specified start and stop
time.
Packets are sorted by generation time and sequence number.
:param ~datetime.datetime start: Minimum generation time of the returned
packets (inclusive)
:param ~datetime.datetime stop: Maximum genreation time of the returned
packets (exclusive)
:param int page_size: Page size of underlying requests. Higher values imply
less overhead, but risk hitting the maximum message size limit.
:param bool descending: If set to ``True`` packets are fetched in reverse
order (most recent first).
:rtype: ~collections.Iterable[.Packet] | ['Reads', 'packet', 'information', 'between', 'the', 'specified', 'start', 'and', 'stop', 'time', '.'] | train | https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L222-L258 |
2,001 | globocom/GloboNetworkAPI-client-python | networkapiclient/ApiV4Equipment.py | ApiV4Equipment.delete | def delete(self, ids):
"""
Method to delete equipments by their id's
:param ids: Identifiers of equipments
:return: None
"""
url = build_uri_with_ids('api/v4/equipment/%s/', ids)
return super(ApiV4Equipment, self).delete(url) | python | def delete(self, ids):
"""
Method to delete equipments by their id's
:param ids: Identifiers of equipments
:return: None
"""
url = build_uri_with_ids('api/v4/equipment/%s/', ids)
return super(ApiV4Equipment, self).delete(url) | ['def', 'delete', '(', 'self', ',', 'ids', ')', ':', 'url', '=', 'build_uri_with_ids', '(', "'api/v4/equipment/%s/'", ',', 'ids', ')', 'return', 'super', '(', 'ApiV4Equipment', ',', 'self', ')', '.', 'delete', '(', 'url', ')'] | Method to delete equipments by their id's
:param ids: Identifiers of equipments
:return: None | ['Method', 'to', 'delete', 'equipments', 'by', 'their', 'id', 's'] | train | https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiV4Equipment.py#L65-L73 |
2,002 | brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py | brocade_firmware.logical_chassis_fwdl_status_output_overall_status | def logical_chassis_fwdl_status_output_overall_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
overall_status = ET.SubElement(output, "overall-status")
overall_status.text = kwargs.pop('overall_status')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def logical_chassis_fwdl_status_output_overall_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
overall_status = ET.SubElement(output, "overall-status")
overall_status.text = kwargs.pop('overall_status')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'logical_chassis_fwdl_status_output_overall_status', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'logical_chassis_fwdl_status', '=', 'ET', '.', 'Element', '(', '"logical_chassis_fwdl_status"', ')', 'config', '=', 'logical_chassis_fwdl_status', 'output', '=', 'ET', '.', 'SubElement', '(', 'logical_chassis_fwdl_status', ',', '"output"', ')', 'overall_status', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"overall-status"', ')', 'overall_status', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'overall_status'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py#L840-L851 |
2,003 | inasafe/inasafe | safe/gui/widgets/message_viewer.py | MessageViewer.generate_pdf | def generate_pdf(self):
"""Generate a PDF from the displayed content."""
printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution)
printer.setPageSize(QtGui.QPrinter.A4)
printer.setColorMode(QtGui.QPrinter.Color)
printer.setOutputFormat(QtGui.QPrinter.PdfFormat)
report_path = unique_filename(suffix='.pdf')
printer.setOutputFileName(report_path)
self.print_(printer)
url = QtCore.QUrl.fromLocalFile(report_path)
# noinspection PyTypeChecker,PyCallByClass,PyArgumentList
QtGui.QDesktopServices.openUrl(url) | python | def generate_pdf(self):
"""Generate a PDF from the displayed content."""
printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution)
printer.setPageSize(QtGui.QPrinter.A4)
printer.setColorMode(QtGui.QPrinter.Color)
printer.setOutputFormat(QtGui.QPrinter.PdfFormat)
report_path = unique_filename(suffix='.pdf')
printer.setOutputFileName(report_path)
self.print_(printer)
url = QtCore.QUrl.fromLocalFile(report_path)
# noinspection PyTypeChecker,PyCallByClass,PyArgumentList
QtGui.QDesktopServices.openUrl(url) | ['def', 'generate_pdf', '(', 'self', ')', ':', 'printer', '=', 'QtGui', '.', 'QPrinter', '(', 'QtGui', '.', 'QPrinter', '.', 'HighResolution', ')', 'printer', '.', 'setPageSize', '(', 'QtGui', '.', 'QPrinter', '.', 'A4', ')', 'printer', '.', 'setColorMode', '(', 'QtGui', '.', 'QPrinter', '.', 'Color', ')', 'printer', '.', 'setOutputFormat', '(', 'QtGui', '.', 'QPrinter', '.', 'PdfFormat', ')', 'report_path', '=', 'unique_filename', '(', 'suffix', '=', "'.pdf'", ')', 'printer', '.', 'setOutputFileName', '(', 'report_path', ')', 'self', '.', 'print_', '(', 'printer', ')', 'url', '=', 'QtCore', '.', 'QUrl', '.', 'fromLocalFile', '(', 'report_path', ')', '# noinspection PyTypeChecker,PyCallByClass,PyArgumentList', 'QtGui', '.', 'QDesktopServices', '.', 'openUrl', '(', 'url', ')'] | Generate a PDF from the displayed content. | ['Generate', 'a', 'PDF', 'from', 'the', 'displayed', 'content', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/message_viewer.py#L378-L389 |
2,004 | hydpy-dev/hydpy | hydpy/models/lland/lland_derived.py | KB.update | def update(self):
"""Update |KB| based on |EQB| and |TInd|.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqb(10.0)
>>> tind.value = 10.0
>>> derived.kb.update()
>>> derived.kb
kb(100.0)
"""
con = self.subpars.pars.control
self(con.eqb*con.tind) | python | def update(self):
"""Update |KB| based on |EQB| and |TInd|.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqb(10.0)
>>> tind.value = 10.0
>>> derived.kb.update()
>>> derived.kb
kb(100.0)
"""
con = self.subpars.pars.control
self(con.eqb*con.tind) | ['def', 'update', '(', 'self', ')', ':', 'con', '=', 'self', '.', 'subpars', '.', 'pars', '.', 'control', 'self', '(', 'con', '.', 'eqb', '*', 'con', '.', 'tind', ')'] | Update |KB| based on |EQB| and |TInd|.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqb(10.0)
>>> tind.value = 10.0
>>> derived.kb.update()
>>> derived.kb
kb(100.0) | ['Update', '|KB|', 'based', 'on', '|EQB|', 'and', '|TInd|', '.'] | train | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_derived.py#L112-L124 |
2,005 | CivicSpleen/ckcache | ckcache/s3.py | S3Cache.list | def list(self, path=None, with_metadata=False, include_partitions=False):
'''Get a list of all of bundle files in the cache. Does not return partition files'''
import json
sub_path = self.prefix + '/' + path.strip('/') if path else self.prefix
l = {}
for e in self.bucket.list(sub_path):
path = e.name.replace(self.prefix, '', 1).strip('/')
if path.startswith('_') or path.startswith('meta'):
continue
# TODO 'include_partitions' doesn't make any sense outside of ambry
if not include_partitions and path.count('/') > 1:
continue # partition files
if with_metadata:
d = self.metadata(path)
if d and 'identity' in d:
d['identity'] = json.loads(d['identity'])
else:
d = {}
d['caches'] = [self.repo_id]
if path:
l[path] = d
return l | python | def list(self, path=None, with_metadata=False, include_partitions=False):
'''Get a list of all of bundle files in the cache. Does not return partition files'''
import json
sub_path = self.prefix + '/' + path.strip('/') if path else self.prefix
l = {}
for e in self.bucket.list(sub_path):
path = e.name.replace(self.prefix, '', 1).strip('/')
if path.startswith('_') or path.startswith('meta'):
continue
# TODO 'include_partitions' doesn't make any sense outside of ambry
if not include_partitions and path.count('/') > 1:
continue # partition files
if with_metadata:
d = self.metadata(path)
if d and 'identity' in d:
d['identity'] = json.loads(d['identity'])
else:
d = {}
d['caches'] = [self.repo_id]
if path:
l[path] = d
return l | ['def', 'list', '(', 'self', ',', 'path', '=', 'None', ',', 'with_metadata', '=', 'False', ',', 'include_partitions', '=', 'False', ')', ':', 'import', 'json', 'sub_path', '=', 'self', '.', 'prefix', '+', "'/'", '+', 'path', '.', 'strip', '(', "'/'", ')', 'if', 'path', 'else', 'self', '.', 'prefix', 'l', '=', '{', '}', 'for', 'e', 'in', 'self', '.', 'bucket', '.', 'list', '(', 'sub_path', ')', ':', 'path', '=', 'e', '.', 'name', '.', 'replace', '(', 'self', '.', 'prefix', ',', "''", ',', '1', ')', '.', 'strip', '(', "'/'", ')', 'if', 'path', '.', 'startswith', '(', "'_'", ')', 'or', 'path', '.', 'startswith', '(', "'meta'", ')', ':', 'continue', "# TODO 'include_partitions' doesn't make any sense outside of ambry", 'if', 'not', 'include_partitions', 'and', 'path', '.', 'count', '(', "'/'", ')', '>', '1', ':', 'continue', '# partition files', 'if', 'with_metadata', ':', 'd', '=', 'self', '.', 'metadata', '(', 'path', ')', 'if', 'd', 'and', "'identity'", 'in', 'd', ':', 'd', '[', "'identity'", ']', '=', 'json', '.', 'loads', '(', 'd', '[', "'identity'", ']', ')', 'else', ':', 'd', '=', '{', '}', 'd', '[', "'caches'", ']', '=', '[', 'self', '.', 'repo_id', ']', 'if', 'path', ':', 'l', '[', 'path', ']', '=', 'd', 'return', 'l'] | Get a list of all of bundle files in the cache. Does not return partition files | ['Get', 'a', 'list', 'of', 'all', 'of', 'bundle', 'files', 'in', 'the', 'cache', '.', 'Does', 'not', 'return', 'partition', 'files'] | train | https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/s3.py#L440-L470 |
2,006 | crocs-muni/roca | roca/detect.py | try_get_dn_string | def try_get_dn_string(subject, shorten=False):
"""
Returns DN as a string
:param subject:
:param shorten:
:return:
"""
try:
from cryptography.x509.oid import NameOID
from cryptography.x509 import ObjectIdentifier
oid_names = {
getattr(NameOID, 'COMMON_NAME', ObjectIdentifier("2.5.4.3")): "CN",
getattr(NameOID, 'COUNTRY_NAME', ObjectIdentifier("2.5.4.6")): "C",
getattr(NameOID, 'LOCALITY_NAME', ObjectIdentifier("2.5.4.7")): "L",
getattr(NameOID, 'STATE_OR_PROVINCE_NAME', ObjectIdentifier("2.5.4.8")): "ST",
getattr(NameOID, 'STREET_ADDRESS', ObjectIdentifier("2.5.4.9")): "St",
getattr(NameOID, 'ORGANIZATION_NAME', ObjectIdentifier("2.5.4.10")): "O",
getattr(NameOID, 'ORGANIZATIONAL_UNIT_NAME', ObjectIdentifier("2.5.4.11")): "OU",
getattr(NameOID, 'SERIAL_NUMBER', ObjectIdentifier("2.5.4.5")): "SN",
getattr(NameOID, 'USER_ID', ObjectIdentifier("0.9.2342.19200300.100.1.1")): "userID",
getattr(NameOID, 'DOMAIN_COMPONENT', ObjectIdentifier("0.9.2342.19200300.100.1.25")): "domainComponent",
getattr(NameOID, 'EMAIL_ADDRESS', ObjectIdentifier("1.2.840.113549.1.9.1")): "emailAddress",
getattr(NameOID, 'POSTAL_CODE', ObjectIdentifier("2.5.4.17")): "ZIP",
}
ret = []
try:
for attribute in subject:
oid = attribute.oid
dot = oid.dotted_string
oid_name = oid_names[oid] if shorten and oid in oid_names else oid._name
val = attribute.value
ret.append('%s: %s' % (oid_name, val))
except:
pass
return ', '.join(ret)
except Exception as e:
logger.warning('Unexpected error: %s' % e)
return 'N/A' | python | def try_get_dn_string(subject, shorten=False):
"""
Returns DN as a string
:param subject:
:param shorten:
:return:
"""
try:
from cryptography.x509.oid import NameOID
from cryptography.x509 import ObjectIdentifier
oid_names = {
getattr(NameOID, 'COMMON_NAME', ObjectIdentifier("2.5.4.3")): "CN",
getattr(NameOID, 'COUNTRY_NAME', ObjectIdentifier("2.5.4.6")): "C",
getattr(NameOID, 'LOCALITY_NAME', ObjectIdentifier("2.5.4.7")): "L",
getattr(NameOID, 'STATE_OR_PROVINCE_NAME', ObjectIdentifier("2.5.4.8")): "ST",
getattr(NameOID, 'STREET_ADDRESS', ObjectIdentifier("2.5.4.9")): "St",
getattr(NameOID, 'ORGANIZATION_NAME', ObjectIdentifier("2.5.4.10")): "O",
getattr(NameOID, 'ORGANIZATIONAL_UNIT_NAME', ObjectIdentifier("2.5.4.11")): "OU",
getattr(NameOID, 'SERIAL_NUMBER', ObjectIdentifier("2.5.4.5")): "SN",
getattr(NameOID, 'USER_ID', ObjectIdentifier("0.9.2342.19200300.100.1.1")): "userID",
getattr(NameOID, 'DOMAIN_COMPONENT', ObjectIdentifier("0.9.2342.19200300.100.1.25")): "domainComponent",
getattr(NameOID, 'EMAIL_ADDRESS', ObjectIdentifier("1.2.840.113549.1.9.1")): "emailAddress",
getattr(NameOID, 'POSTAL_CODE', ObjectIdentifier("2.5.4.17")): "ZIP",
}
ret = []
try:
for attribute in subject:
oid = attribute.oid
dot = oid.dotted_string
oid_name = oid_names[oid] if shorten and oid in oid_names else oid._name
val = attribute.value
ret.append('%s: %s' % (oid_name, val))
except:
pass
return ', '.join(ret)
except Exception as e:
logger.warning('Unexpected error: %s' % e)
return 'N/A' | ['def', 'try_get_dn_string', '(', 'subject', ',', 'shorten', '=', 'False', ')', ':', 'try', ':', 'from', 'cryptography', '.', 'x509', '.', 'oid', 'import', 'NameOID', 'from', 'cryptography', '.', 'x509', 'import', 'ObjectIdentifier', 'oid_names', '=', '{', 'getattr', '(', 'NameOID', ',', "'COMMON_NAME'", ',', 'ObjectIdentifier', '(', '"2.5.4.3"', ')', ')', ':', '"CN"', ',', 'getattr', '(', 'NameOID', ',', "'COUNTRY_NAME'", ',', 'ObjectIdentifier', '(', '"2.5.4.6"', ')', ')', ':', '"C"', ',', 'getattr', '(', 'NameOID', ',', "'LOCALITY_NAME'", ',', 'ObjectIdentifier', '(', '"2.5.4.7"', ')', ')', ':', '"L"', ',', 'getattr', '(', 'NameOID', ',', "'STATE_OR_PROVINCE_NAME'", ',', 'ObjectIdentifier', '(', '"2.5.4.8"', ')', ')', ':', '"ST"', ',', 'getattr', '(', 'NameOID', ',', "'STREET_ADDRESS'", ',', 'ObjectIdentifier', '(', '"2.5.4.9"', ')', ')', ':', '"St"', ',', 'getattr', '(', 'NameOID', ',', "'ORGANIZATION_NAME'", ',', 'ObjectIdentifier', '(', '"2.5.4.10"', ')', ')', ':', '"O"', ',', 'getattr', '(', 'NameOID', ',', "'ORGANIZATIONAL_UNIT_NAME'", ',', 'ObjectIdentifier', '(', '"2.5.4.11"', ')', ')', ':', '"OU"', ',', 'getattr', '(', 'NameOID', ',', "'SERIAL_NUMBER'", ',', 'ObjectIdentifier', '(', '"2.5.4.5"', ')', ')', ':', '"SN"', ',', 'getattr', '(', 'NameOID', ',', "'USER_ID'", ',', 'ObjectIdentifier', '(', '"0.9.2342.19200300.100.1.1"', ')', ')', ':', '"userID"', ',', 'getattr', '(', 'NameOID', ',', "'DOMAIN_COMPONENT'", ',', 'ObjectIdentifier', '(', '"0.9.2342.19200300.100.1.25"', ')', ')', ':', '"domainComponent"', ',', 'getattr', '(', 'NameOID', ',', "'EMAIL_ADDRESS'", ',', 'ObjectIdentifier', '(', '"1.2.840.113549.1.9.1"', ')', ')', ':', '"emailAddress"', ',', 'getattr', '(', 'NameOID', ',', "'POSTAL_CODE'", ',', 'ObjectIdentifier', '(', '"2.5.4.17"', ')', ')', ':', '"ZIP"', ',', '}', 'ret', '=', '[', ']', 'try', ':', 'for', 'attribute', 'in', 'subject', ':', 'oid', '=', 'attribute', '.', 'oid', 'dot', '=', 'oid', '.', 'dotted_string', 'oid_name', '=', 'oid_names', '[', 'oid', ']', 'if', 'shorten', 'and', 'oid', 'in', 'oid_names', 'else', 'oid', '.', '_name', 'val', '=', 'attribute', '.', 'value', 'ret', '.', 'append', '(', "'%s: %s'", '%', '(', 'oid_name', ',', 'val', ')', ')', 'except', ':', 'pass', 'return', "', '", '.', 'join', '(', 'ret', ')', 'except', 'Exception', 'as', 'e', ':', 'logger', '.', 'warning', '(', "'Unexpected error: %s'", '%', 'e', ')', 'return', "'N/A'"] | Returns DN as a string
:param subject:
:param shorten:
:return: | ['Returns', 'DN', 'as', 'a', 'string', ':', 'param', 'subject', ':', ':', 'param', 'shorten', ':', ':', 'return', ':'] | train | https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L237-L276 |
2,007 | mwouts/jupytext | jupytext/cell_reader.py | LightScriptCellReader.find_region_end | def find_region_end(self, lines):
"""Find the end of the region started with start and end markers"""
if self.metadata and 'cell_type' in self.metadata:
self.cell_type = self.metadata.pop('cell_type')
else:
self.cell_type = 'code'
parser = StringParser(self.language or self.default_language)
for i, line in enumerate(lines):
# skip cell header
if self.metadata is not None and i == 0:
continue
if parser.is_quoted():
parser.read_line(line)
continue
parser.read_line(line)
# New code region
# Simple code pattern in LightScripts must be preceded with a blank line
if self.start_code_re.match(line) or (
self.simple_start_code_re and self.simple_start_code_re.match(line) and
(self.cell_marker_start or i == 0 or _BLANK_LINE.match(lines[i - 1]))):
if self.explicit_end_marker_required:
# Metadata here was conditioned on finding an explicit end marker
# before the next start marker. So we dismiss it.
self.metadata = None
self.language = None
if i > 0 and _BLANK_LINE.match(lines[i - 1]):
if i > 1 and _BLANK_LINE.match(lines[i - 2]):
return i - 2, i, False
return i - 1, i, False
return i, i, False
if not self.ignore_end_marker and self.end_code_re:
if self.end_code_re.match(line):
return i, i + 1, True
elif _BLANK_LINE.match(line):
if not next_code_is_indented(lines[i:]):
if i > 0:
return i, i + 1, False
if len(lines) > 1 and not _BLANK_LINE.match(lines[1]):
return 1, 1, False
return 1, 2, False
return len(lines), len(lines), False | python | def find_region_end(self, lines):
"""Find the end of the region started with start and end markers"""
if self.metadata and 'cell_type' in self.metadata:
self.cell_type = self.metadata.pop('cell_type')
else:
self.cell_type = 'code'
parser = StringParser(self.language or self.default_language)
for i, line in enumerate(lines):
# skip cell header
if self.metadata is not None and i == 0:
continue
if parser.is_quoted():
parser.read_line(line)
continue
parser.read_line(line)
# New code region
# Simple code pattern in LightScripts must be preceded with a blank line
if self.start_code_re.match(line) or (
self.simple_start_code_re and self.simple_start_code_re.match(line) and
(self.cell_marker_start or i == 0 or _BLANK_LINE.match(lines[i - 1]))):
if self.explicit_end_marker_required:
# Metadata here was conditioned on finding an explicit end marker
# before the next start marker. So we dismiss it.
self.metadata = None
self.language = None
if i > 0 and _BLANK_LINE.match(lines[i - 1]):
if i > 1 and _BLANK_LINE.match(lines[i - 2]):
return i - 2, i, False
return i - 1, i, False
return i, i, False
if not self.ignore_end_marker and self.end_code_re:
if self.end_code_re.match(line):
return i, i + 1, True
elif _BLANK_LINE.match(line):
if not next_code_is_indented(lines[i:]):
if i > 0:
return i, i + 1, False
if len(lines) > 1 and not _BLANK_LINE.match(lines[1]):
return 1, 1, False
return 1, 2, False
return len(lines), len(lines), False | ['def', 'find_region_end', '(', 'self', ',', 'lines', ')', ':', 'if', 'self', '.', 'metadata', 'and', "'cell_type'", 'in', 'self', '.', 'metadata', ':', 'self', '.', 'cell_type', '=', 'self', '.', 'metadata', '.', 'pop', '(', "'cell_type'", ')', 'else', ':', 'self', '.', 'cell_type', '=', "'code'", 'parser', '=', 'StringParser', '(', 'self', '.', 'language', 'or', 'self', '.', 'default_language', ')', 'for', 'i', ',', 'line', 'in', 'enumerate', '(', 'lines', ')', ':', '# skip cell header', 'if', 'self', '.', 'metadata', 'is', 'not', 'None', 'and', 'i', '==', '0', ':', 'continue', 'if', 'parser', '.', 'is_quoted', '(', ')', ':', 'parser', '.', 'read_line', '(', 'line', ')', 'continue', 'parser', '.', 'read_line', '(', 'line', ')', '# New code region', '# Simple code pattern in LightScripts must be preceded with a blank line', 'if', 'self', '.', 'start_code_re', '.', 'match', '(', 'line', ')', 'or', '(', 'self', '.', 'simple_start_code_re', 'and', 'self', '.', 'simple_start_code_re', '.', 'match', '(', 'line', ')', 'and', '(', 'self', '.', 'cell_marker_start', 'or', 'i', '==', '0', 'or', '_BLANK_LINE', '.', 'match', '(', 'lines', '[', 'i', '-', '1', ']', ')', ')', ')', ':', 'if', 'self', '.', 'explicit_end_marker_required', ':', '# Metadata here was conditioned on finding an explicit end marker', '# before the next start marker. So we dismiss it.', 'self', '.', 'metadata', '=', 'None', 'self', '.', 'language', '=', 'None', 'if', 'i', '>', '0', 'and', '_BLANK_LINE', '.', 'match', '(', 'lines', '[', 'i', '-', '1', ']', ')', ':', 'if', 'i', '>', '1', 'and', '_BLANK_LINE', '.', 'match', '(', 'lines', '[', 'i', '-', '2', ']', ')', ':', 'return', 'i', '-', '2', ',', 'i', ',', 'False', 'return', 'i', '-', '1', ',', 'i', ',', 'False', 'return', 'i', ',', 'i', ',', 'False', 'if', 'not', 'self', '.', 'ignore_end_marker', 'and', 'self', '.', 'end_code_re', ':', 'if', 'self', '.', 'end_code_re', '.', 'match', '(', 'line', ')', ':', 'return', 'i', ',', 'i', '+', '1', ',', 'True', 'elif', '_BLANK_LINE', '.', 'match', '(', 'line', ')', ':', 'if', 'not', 'next_code_is_indented', '(', 'lines', '[', 'i', ':', ']', ')', ':', 'if', 'i', '>', '0', ':', 'return', 'i', ',', 'i', '+', '1', ',', 'False', 'if', 'len', '(', 'lines', ')', '>', '1', 'and', 'not', '_BLANK_LINE', '.', 'match', '(', 'lines', '[', '1', ']', ')', ':', 'return', '1', ',', '1', ',', 'False', 'return', '1', ',', '2', ',', 'False', 'return', 'len', '(', 'lines', ')', ',', 'len', '(', 'lines', ')', ',', 'False'] | Find the end of the region started with start and end markers | ['Find', 'the', 'end', 'of', 'the', 'region', 'started', 'with', 'start', 'and', 'end', 'markers'] | train | https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cell_reader.py#L509-L557 |
2,008 | edx/pa11ycrawler | pa11ycrawler/pipelines/pa11y.py | ignore_rules_for_url | def ignore_rules_for_url(spider, url):
"""
Returns a list of ignore rules from the given spider,
that are relevant to the given URL.
"""
ignore_rules = getattr(spider, "pa11y_ignore_rules", {}) or {}
return itertools.chain.from_iterable(
rule_list
for url_glob, rule_list
in ignore_rules.items()
if fnmatch.fnmatch(url, url_glob)
) | python | def ignore_rules_for_url(spider, url):
"""
Returns a list of ignore rules from the given spider,
that are relevant to the given URL.
"""
ignore_rules = getattr(spider, "pa11y_ignore_rules", {}) or {}
return itertools.chain.from_iterable(
rule_list
for url_glob, rule_list
in ignore_rules.items()
if fnmatch.fnmatch(url, url_glob)
) | ['def', 'ignore_rules_for_url', '(', 'spider', ',', 'url', ')', ':', 'ignore_rules', '=', 'getattr', '(', 'spider', ',', '"pa11y_ignore_rules"', ',', '{', '}', ')', 'or', '{', '}', 'return', 'itertools', '.', 'chain', '.', 'from_iterable', '(', 'rule_list', 'for', 'url_glob', ',', 'rule_list', 'in', 'ignore_rules', '.', 'items', '(', ')', 'if', 'fnmatch', '.', 'fnmatch', '(', 'url', ',', 'url_glob', ')', ')'] | Returns a list of ignore rules from the given spider,
that are relevant to the given URL. | ['Returns', 'a', 'list', 'of', 'ignore', 'rules', 'from', 'the', 'given', 'spider', 'that', 'are', 'relevant', 'to', 'the', 'given', 'URL', '.'] | train | https://github.com/edx/pa11ycrawler/blob/fc672d4524463bc050ade4c7c97801c0d5bf8c9e/pa11ycrawler/pipelines/pa11y.py#L21-L32 |
2,009 | a1ezzz/wasp-general | wasp_general/task/dependency.py | WTaskDependencyRegistryStorage.started_tasks | def started_tasks(self, task_registry_id=None, task_cls=None):
""" Return tasks that was started. Result way be filtered by the given arguments.
:param task_registry_id: if it is specified, then try to return single task which id is the same as \
this value.
:param task_cls: if it is specified then result will be consists of this subclass only
:return: None or WTask or tuple of WTask
"""
if task_registry_id is not None:
task = None
for registered_task in self.__started:
if registered_task.__registry_tag__ == task_registry_id:
task = registered_task
if task_cls is not None and task is not None:
if isinstance(task, task_cls) is True:
return task
return None
return task
result = filter(lambda x: x is not None, self.__started)
if task_cls is not None:
result = filter(lambda x: isinstance(x, task_cls), result)
return tuple(result) | python | def started_tasks(self, task_registry_id=None, task_cls=None):
""" Return tasks that was started. Result way be filtered by the given arguments.
:param task_registry_id: if it is specified, then try to return single task which id is the same as \
this value.
:param task_cls: if it is specified then result will be consists of this subclass only
:return: None or WTask or tuple of WTask
"""
if task_registry_id is not None:
task = None
for registered_task in self.__started:
if registered_task.__registry_tag__ == task_registry_id:
task = registered_task
if task_cls is not None and task is not None:
if isinstance(task, task_cls) is True:
return task
return None
return task
result = filter(lambda x: x is not None, self.__started)
if task_cls is not None:
result = filter(lambda x: isinstance(x, task_cls), result)
return tuple(result) | ['def', 'started_tasks', '(', 'self', ',', 'task_registry_id', '=', 'None', ',', 'task_cls', '=', 'None', ')', ':', 'if', 'task_registry_id', 'is', 'not', 'None', ':', 'task', '=', 'None', 'for', 'registered_task', 'in', 'self', '.', '__started', ':', 'if', 'registered_task', '.', '__registry_tag__', '==', 'task_registry_id', ':', 'task', '=', 'registered_task', 'if', 'task_cls', 'is', 'not', 'None', 'and', 'task', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'task', ',', 'task_cls', ')', 'is', 'True', ':', 'return', 'task', 'return', 'None', 'return', 'task', 'result', '=', 'filter', '(', 'lambda', 'x', ':', 'x', 'is', 'not', 'None', ',', 'self', '.', '__started', ')', 'if', 'task_cls', 'is', 'not', 'None', ':', 'result', '=', 'filter', '(', 'lambda', 'x', ':', 'isinstance', '(', 'x', ',', 'task_cls', ')', ',', 'result', ')', 'return', 'tuple', '(', 'result', ')'] | Return tasks that was started. Result way be filtered by the given arguments.
:param task_registry_id: if it is specified, then try to return single task which id is the same as \
this value.
:param task_cls: if it is specified then result will be consists of this subclass only
:return: None or WTask or tuple of WTask | ['Return', 'tasks', 'that', 'was', 'started', '.', 'Result', 'way', 'be', 'filtered', 'by', 'the', 'given', 'arguments', '.'] | train | https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/dependency.py#L153-L178 |
2,010 | xapple/plumbing | plumbing/common.py | sanitize_text | def sanitize_text(text):
"""Make a safe representation of a string.
Note: the `\s` special character matches any whitespace character.
This is equivalent to the set [\t\n\r\f\v] as well as ` ` (whitespace)."""
# First replace characters that have specific effects with their repr #
text = re.sub("(\s)", lambda m: repr(m.group(0)).strip("'"), text)
# Make it a unicode string (the try supports python 2 and 3) #
try: text = text.decode('utf-8')
except AttributeError: pass
# Normalize it “
text = unicodedata.normalize('NFC', text)
return text | python | def sanitize_text(text):
"""Make a safe representation of a string.
Note: the `\s` special character matches any whitespace character.
This is equivalent to the set [\t\n\r\f\v] as well as ` ` (whitespace)."""
# First replace characters that have specific effects with their repr #
text = re.sub("(\s)", lambda m: repr(m.group(0)).strip("'"), text)
# Make it a unicode string (the try supports python 2 and 3) #
try: text = text.decode('utf-8')
except AttributeError: pass
# Normalize it “
text = unicodedata.normalize('NFC', text)
return text | ['def', 'sanitize_text', '(', 'text', ')', ':', '# First replace characters that have specific effects with their repr #', 'text', '=', 're', '.', 'sub', '(', '"(\\s)"', ',', 'lambda', 'm', ':', 'repr', '(', 'm', '.', 'group', '(', '0', ')', ')', '.', 'strip', '(', '"\'"', ')', ',', 'text', ')', '# Make it a unicode string (the try supports python 2 and 3) #', 'try', ':', 'text', '=', 'text', '.', 'decode', '(', "'utf-8'", ')', 'except', 'AttributeError', ':', 'pass', '# Normalize it “', 'text', '=', 'unicodedata', '.', 'normalize', '(', "'NFC'", ',', 'text', ')', 'return', 'text'] | Make a safe representation of a string.
Note: the `\s` special character matches any whitespace character.
This is equivalent to the set [\t\n\r\f\v] as well as ` ` (whitespace). | ['Make', 'a', 'safe', 'representation', 'of', 'a', 'string', '.', 'Note', ':', 'the', '\\', 's', 'special', 'character', 'matches', 'any', 'whitespace', 'character', '.', 'This', 'is', 'equivalent', 'to', 'the', 'set', '[', '\\', 't', '\\', 'n', '\\', 'r', '\\', 'f', '\\', 'v', ']', 'as', 'well', 'as', '(', 'whitespace', ')', '.'] | train | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/common.py#L30-L41 |
2,011 | benedictpaten/sonLib | tree.py | normaliseWV | def normaliseWV(wV, normFac=1.0):
"""
make char probs divisible by one
"""
f = sum(wV) / normFac
return [ i/f for i in wV ] | python | def normaliseWV(wV, normFac=1.0):
"""
make char probs divisible by one
"""
f = sum(wV) / normFac
return [ i/f for i in wV ] | ['def', 'normaliseWV', '(', 'wV', ',', 'normFac', '=', '1.0', ')', ':', 'f', '=', 'sum', '(', 'wV', ')', '/', 'normFac', 'return', '[', 'i', '/', 'f', 'for', 'i', 'in', 'wV', ']'] | make char probs divisible by one | ['make', 'char', 'probs', 'divisible', 'by', 'one'] | train | https://github.com/benedictpaten/sonLib/blob/1decb75bb439b70721ec776f685ce98e25217d26/tree.py#L180-L185 |
2,012 | erijo/tellcore-py | tellcore/telldus.py | DeviceFactory | def DeviceFactory(id, lib=None):
"""Create the correct device instance based on device type and return it.
:return: a :class:`Device` or :class:`DeviceGroup` instance.
"""
lib = lib or Library()
if lib.tdGetDeviceType(id) == const.TELLSTICK_TYPE_GROUP:
return DeviceGroup(id, lib=lib)
return Device(id, lib=lib) | python | def DeviceFactory(id, lib=None):
"""Create the correct device instance based on device type and return it.
:return: a :class:`Device` or :class:`DeviceGroup` instance.
"""
lib = lib or Library()
if lib.tdGetDeviceType(id) == const.TELLSTICK_TYPE_GROUP:
return DeviceGroup(id, lib=lib)
return Device(id, lib=lib) | ['def', 'DeviceFactory', '(', 'id', ',', 'lib', '=', 'None', ')', ':', 'lib', '=', 'lib', 'or', 'Library', '(', ')', 'if', 'lib', '.', 'tdGetDeviceType', '(', 'id', ')', '==', 'const', '.', 'TELLSTICK_TYPE_GROUP', ':', 'return', 'DeviceGroup', '(', 'id', ',', 'lib', '=', 'lib', ')', 'return', 'Device', '(', 'id', ',', 'lib', '=', 'lib', ')'] | Create the correct device instance based on device type and return it.
:return: a :class:`Device` or :class:`DeviceGroup` instance. | ['Create', 'the', 'correct', 'device', 'instance', 'based', 'on', 'device', 'type', 'and', 'return', 'it', '.'] | train | https://github.com/erijo/tellcore-py/blob/7a1eb53e12ef039a2350933e502633df7560f6a8/tellcore/telldus.py#L266-L274 |
2,013 | vsoch/helpme | helpme/main/base/__init__.py | HelperBase.start | def start(self, positionals=None):
'''start the helper flow. We check helper system configurations to
determine components that should be collected for the submission.
This is where the client can also pass on any extra (positional)
arguments in a list from the user.
'''
bot.info('[helpme|%s]' %(self.name))
self.speak()
self._start(positionals) | python | def start(self, positionals=None):
'''start the helper flow. We check helper system configurations to
determine components that should be collected for the submission.
This is where the client can also pass on any extra (positional)
arguments in a list from the user.
'''
bot.info('[helpme|%s]' %(self.name))
self.speak()
self._start(positionals) | ['def', 'start', '(', 'self', ',', 'positionals', '=', 'None', ')', ':', 'bot', '.', 'info', '(', "'[helpme|%s]'", '%', '(', 'self', '.', 'name', ')', ')', 'self', '.', 'speak', '(', ')', 'self', '.', '_start', '(', 'positionals', ')'] | start the helper flow. We check helper system configurations to
determine components that should be collected for the submission.
This is where the client can also pass on any extra (positional)
arguments in a list from the user. | ['start', 'the', 'helper', 'flow', '.', 'We', 'check', 'helper', 'system', 'configurations', 'to', 'determine', 'components', 'that', 'should', 'be', 'collected', 'for', 'the', 'submission', '.', 'This', 'is', 'where', 'the', 'client', 'can', 'also', 'pass', 'on', 'any', 'extra', '(', 'positional', ')', 'arguments', 'in', 'a', 'list', 'from', 'the', 'user', '.'] | train | https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/main/base/__init__.py#L105-L113 |
2,014 | pmacosta/pcsv | pcsv/dsort.py | dsort | def dsort(fname, order, has_header=True, frow=0, ofname=None):
r"""
Sort file data.
:param fname: Name of the comma-separated values file to sort
:type fname: FileNameExists_
:param order: Sort order
:type order: :ref:`CsvColFilter`
:param has_header: Flag that indicates whether the comma-separated
values file to sort has column headers in its first line
(True) or not (False)
:type has_header: boolean
:param frow: First data row (starting from 1). If 0 the row where data
starts is auto-detected as the first row that has a number
(integer of float) in at least one of its columns
:type frow: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the sorted data. If None the sorting is
done "in place"
:type ofname: FileName_ or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.dsort.dsort
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frow\` is not valid)
* RuntimeError (Argument \`has_header\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
ofname = fname if ofname is None else ofname
obj = CsvFile(fname=fname, has_header=has_header, frow=frow)
obj.dsort(order)
obj.write(fname=ofname, header=has_header, append=False) | python | def dsort(fname, order, has_header=True, frow=0, ofname=None):
r"""
Sort file data.
:param fname: Name of the comma-separated values file to sort
:type fname: FileNameExists_
:param order: Sort order
:type order: :ref:`CsvColFilter`
:param has_header: Flag that indicates whether the comma-separated
values file to sort has column headers in its first line
(True) or not (False)
:type has_header: boolean
:param frow: First data row (starting from 1). If 0 the row where data
starts is auto-detected as the first row that has a number
(integer of float) in at least one of its columns
:type frow: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the sorted data. If None the sorting is
done "in place"
:type ofname: FileName_ or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.dsort.dsort
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frow\` is not valid)
* RuntimeError (Argument \`has_header\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
ofname = fname if ofname is None else ofname
obj = CsvFile(fname=fname, has_header=has_header, frow=frow)
obj.dsort(order)
obj.write(fname=ofname, header=has_header, append=False) | ['def', 'dsort', '(', 'fname', ',', 'order', ',', 'has_header', '=', 'True', ',', 'frow', '=', '0', ',', 'ofname', '=', 'None', ')', ':', 'ofname', '=', 'fname', 'if', 'ofname', 'is', 'None', 'else', 'ofname', 'obj', '=', 'CsvFile', '(', 'fname', '=', 'fname', ',', 'has_header', '=', 'has_header', ',', 'frow', '=', 'frow', ')', 'obj', '.', 'dsort', '(', 'order', ')', 'obj', '.', 'write', '(', 'fname', '=', 'ofname', ',', 'header', '=', 'has_header', ',', 'append', '=', 'False', ')'] | r"""
Sort file data.
:param fname: Name of the comma-separated values file to sort
:type fname: FileNameExists_
:param order: Sort order
:type order: :ref:`CsvColFilter`
:param has_header: Flag that indicates whether the comma-separated
values file to sort has column headers in its first line
(True) or not (False)
:type has_header: boolean
:param frow: First data row (starting from 1). If 0 the row where data
starts is auto-detected as the first row that has a number
(integer of float) in at least one of its columns
:type frow: NonNegativeInteger_
:param ofname: Name of the output comma-separated values file, the file
that will contain the sorted data. If None the sorting is
done "in place"
:type ofname: FileName_ or None
.. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for pcsv.dsort.dsort
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frow\` is not valid)
* RuntimeError (Argument \`has_header\` is not valid)
* RuntimeError (Argument \`ofname\` is not valid)
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Column headers are not unique in file *[fname]*)
* RuntimeError (File *[fname]* has no valid data)
* RuntimeError (File *[fname]* is empty)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]] | ['r', 'Sort', 'file', 'data', '.'] | train | https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/dsort.py#L37-L93 |
2,015 | secdev/scapy | scapy/asn1/mib.py | _mib_register | def _mib_register(ident, value, the_mib, unresolved):
"""Internal function used to register an OID and its name in a MIBDict"""
if ident in the_mib or ident in unresolved:
return ident in the_mib
resval = []
not_resolved = 0
for v in value:
if _mib_re_integer.match(v):
resval.append(v)
else:
v = fixname(plain_str(v))
if v not in the_mib:
not_resolved = 1
if v in the_mib:
v = the_mib[v]
elif v in unresolved:
v = unresolved[v]
if isinstance(v, list):
resval += v
else:
resval.append(v)
if not_resolved:
unresolved[ident] = resval
return False
else:
the_mib[ident] = resval
keys = list(unresolved)
i = 0
while i < len(keys):
k = keys[i]
if _mib_register(k, unresolved[k], the_mib, {}):
del(unresolved[k])
del(keys[i])
i = 0
else:
i += 1
return True | python | def _mib_register(ident, value, the_mib, unresolved):
"""Internal function used to register an OID and its name in a MIBDict"""
if ident in the_mib or ident in unresolved:
return ident in the_mib
resval = []
not_resolved = 0
for v in value:
if _mib_re_integer.match(v):
resval.append(v)
else:
v = fixname(plain_str(v))
if v not in the_mib:
not_resolved = 1
if v in the_mib:
v = the_mib[v]
elif v in unresolved:
v = unresolved[v]
if isinstance(v, list):
resval += v
else:
resval.append(v)
if not_resolved:
unresolved[ident] = resval
return False
else:
the_mib[ident] = resval
keys = list(unresolved)
i = 0
while i < len(keys):
k = keys[i]
if _mib_register(k, unresolved[k], the_mib, {}):
del(unresolved[k])
del(keys[i])
i = 0
else:
i += 1
return True | ['def', '_mib_register', '(', 'ident', ',', 'value', ',', 'the_mib', ',', 'unresolved', ')', ':', 'if', 'ident', 'in', 'the_mib', 'or', 'ident', 'in', 'unresolved', ':', 'return', 'ident', 'in', 'the_mib', 'resval', '=', '[', ']', 'not_resolved', '=', '0', 'for', 'v', 'in', 'value', ':', 'if', '_mib_re_integer', '.', 'match', '(', 'v', ')', ':', 'resval', '.', 'append', '(', 'v', ')', 'else', ':', 'v', '=', 'fixname', '(', 'plain_str', '(', 'v', ')', ')', 'if', 'v', 'not', 'in', 'the_mib', ':', 'not_resolved', '=', '1', 'if', 'v', 'in', 'the_mib', ':', 'v', '=', 'the_mib', '[', 'v', ']', 'elif', 'v', 'in', 'unresolved', ':', 'v', '=', 'unresolved', '[', 'v', ']', 'if', 'isinstance', '(', 'v', ',', 'list', ')', ':', 'resval', '+=', 'v', 'else', ':', 'resval', '.', 'append', '(', 'v', ')', 'if', 'not_resolved', ':', 'unresolved', '[', 'ident', ']', '=', 'resval', 'return', 'False', 'else', ':', 'the_mib', '[', 'ident', ']', '=', 'resval', 'keys', '=', 'list', '(', 'unresolved', ')', 'i', '=', '0', 'while', 'i', '<', 'len', '(', 'keys', ')', ':', 'k', '=', 'keys', '[', 'i', ']', 'if', '_mib_register', '(', 'k', ',', 'unresolved', '[', 'k', ']', ',', 'the_mib', ',', '{', '}', ')', ':', 'del', '(', 'unresolved', '[', 'k', ']', ')', 'del', '(', 'keys', '[', 'i', ']', ')', 'i', '=', '0', 'else', ':', 'i', '+=', '1', 'return', 'True'] | Internal function used to register an OID and its name in a MIBDict | ['Internal', 'function', 'used', 'to', 'register', 'an', 'OID', 'and', 'its', 'name', 'in', 'a', 'MIBDict'] | train | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/asn1/mib.py#L91-L128 |
2,016 | OpenHydrology/floodestimation | floodestimation/analysis.py | GrowthCurveAnalysis.find_donor_catchments | def find_donor_catchments(self, include_subject_catchment='auto'):
"""
Find list of suitable donor cachments, ranked by hydrological similarity distance measure. This method is
implicitly called when calling the :meth:`.growth_curve` method unless the attribute :attr:`.donor_catchments`
is set manually.
The results are stored in :attr:`.donor_catchments`. The (list of)
:class:`floodestimation.entities.Catchment` will have an additional attribute :attr:`similarity_dist`.
:param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext2000
< 0.03
- `force`: always include subject catchment
- `exclude`: do not include the subject catchment
:type include_subject_catchment: str
"""
# Only if we have access to db with gauged catchment data
if self.gauged_cachments:
self.donor_catchments = self.gauged_cachments. \
most_similar_catchments(subject_catchment=self.catchment,
similarity_dist_function=lambda c1, c2: self._similarity_distance(c1, c2),
include_subject_catchment=include_subject_catchment)
else:
self.donor_catchments = [] | python | def find_donor_catchments(self, include_subject_catchment='auto'):
"""
Find list of suitable donor cachments, ranked by hydrological similarity distance measure. This method is
implicitly called when calling the :meth:`.growth_curve` method unless the attribute :attr:`.donor_catchments`
is set manually.
The results are stored in :attr:`.donor_catchments`. The (list of)
:class:`floodestimation.entities.Catchment` will have an additional attribute :attr:`similarity_dist`.
:param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext2000
< 0.03
- `force`: always include subject catchment
- `exclude`: do not include the subject catchment
:type include_subject_catchment: str
"""
# Only if we have access to db with gauged catchment data
if self.gauged_cachments:
self.donor_catchments = self.gauged_cachments. \
most_similar_catchments(subject_catchment=self.catchment,
similarity_dist_function=lambda c1, c2: self._similarity_distance(c1, c2),
include_subject_catchment=include_subject_catchment)
else:
self.donor_catchments = [] | ['def', 'find_donor_catchments', '(', 'self', ',', 'include_subject_catchment', '=', "'auto'", ')', ':', '# Only if we have access to db with gauged catchment data', 'if', 'self', '.', 'gauged_cachments', ':', 'self', '.', 'donor_catchments', '=', 'self', '.', 'gauged_cachments', '.', 'most_similar_catchments', '(', 'subject_catchment', '=', 'self', '.', 'catchment', ',', 'similarity_dist_function', '=', 'lambda', 'c1', ',', 'c2', ':', 'self', '.', '_similarity_distance', '(', 'c1', ',', 'c2', ')', ',', 'include_subject_catchment', '=', 'include_subject_catchment', ')', 'else', ':', 'self', '.', 'donor_catchments', '=', '[', ']'] | Find list of suitable donor cachments, ranked by hydrological similarity distance measure. This method is
implicitly called when calling the :meth:`.growth_curve` method unless the attribute :attr:`.donor_catchments`
is set manually.
The results are stored in :attr:`.donor_catchments`. The (list of)
:class:`floodestimation.entities.Catchment` will have an additional attribute :attr:`similarity_dist`.
:param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext2000
< 0.03
- `force`: always include subject catchment
- `exclude`: do not include the subject catchment
:type include_subject_catchment: str | ['Find', 'list', 'of', 'suitable', 'donor', 'cachments', 'ranked', 'by', 'hydrological', 'similarity', 'distance', 'measure', '.', 'This', 'method', 'is', 'implicitly', 'called', 'when', 'calling', 'the', ':', 'meth', ':', '.', 'growth_curve', 'method', 'unless', 'the', 'attribute', ':', 'attr', ':', '.', 'donor_catchments', 'is', 'set', 'manually', '.'] | train | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L897-L920 |
2,017 | Duke-QCD/hic | hic/flow.py | Sampler.sample | def sample(self, multiplicity):
r"""
Randomly sample azimuthal angles `\phi`.
:param int multiplicity: Number to sample.
:returns: Array of sampled angles.
"""
if self._n is None:
return self._uniform_phi(multiplicity)
# Since the flow PDF does not have an analytic inverse CDF, I use a
# simple accept-reject sampling algorithm. This is reasonably
# efficient since for normal-sized vn, the PDF is close to flat. Now
# due to the overhead of Python functions, it's desirable to minimize
# the number of calls to the random number generator. Therefore I
# sample numbers in chunks; most of the time only one or two chunks
# should be needed. Eventually, I might rewrite this with Cython, but
# it's fast enough for now.
N = 0 # number of phi that have been sampled
phi = np.empty(multiplicity) # allocate array for phi
pdf_max = 1 + 2*self._vn.sum() # sampling efficiency ~ 1/pdf_max
while N < multiplicity:
n_remaining = multiplicity - N
n_to_sample = int(1.03*pdf_max*n_remaining)
phi_chunk = self._uniform_phi(n_to_sample)
phi_chunk = phi_chunk[self._pdf(phi_chunk) >
np.random.uniform(0, pdf_max, n_to_sample)]
K = min(phi_chunk.size, n_remaining) # number of phi to take
phi[N:N+K] = phi_chunk[:K]
N += K
return phi | python | def sample(self, multiplicity):
r"""
Randomly sample azimuthal angles `\phi`.
:param int multiplicity: Number to sample.
:returns: Array of sampled angles.
"""
if self._n is None:
return self._uniform_phi(multiplicity)
# Since the flow PDF does not have an analytic inverse CDF, I use a
# simple accept-reject sampling algorithm. This is reasonably
# efficient since for normal-sized vn, the PDF is close to flat. Now
# due to the overhead of Python functions, it's desirable to minimize
# the number of calls to the random number generator. Therefore I
# sample numbers in chunks; most of the time only one or two chunks
# should be needed. Eventually, I might rewrite this with Cython, but
# it's fast enough for now.
N = 0 # number of phi that have been sampled
phi = np.empty(multiplicity) # allocate array for phi
pdf_max = 1 + 2*self._vn.sum() # sampling efficiency ~ 1/pdf_max
while N < multiplicity:
n_remaining = multiplicity - N
n_to_sample = int(1.03*pdf_max*n_remaining)
phi_chunk = self._uniform_phi(n_to_sample)
phi_chunk = phi_chunk[self._pdf(phi_chunk) >
np.random.uniform(0, pdf_max, n_to_sample)]
K = min(phi_chunk.size, n_remaining) # number of phi to take
phi[N:N+K] = phi_chunk[:K]
N += K
return phi | ['def', 'sample', '(', 'self', ',', 'multiplicity', ')', ':', 'if', 'self', '.', '_n', 'is', 'None', ':', 'return', 'self', '.', '_uniform_phi', '(', 'multiplicity', ')', '# Since the flow PDF does not have an analytic inverse CDF, I use a', '# simple accept-reject sampling algorithm. This is reasonably', '# efficient since for normal-sized vn, the PDF is close to flat. Now', "# due to the overhead of Python functions, it's desirable to minimize", '# the number of calls to the random number generator. Therefore I', '# sample numbers in chunks; most of the time only one or two chunks', '# should be needed. Eventually, I might rewrite this with Cython, but', "# it's fast enough for now.", 'N', '=', '0', '# number of phi that have been sampled', 'phi', '=', 'np', '.', 'empty', '(', 'multiplicity', ')', '# allocate array for phi', 'pdf_max', '=', '1', '+', '2', '*', 'self', '.', '_vn', '.', 'sum', '(', ')', '# sampling efficiency ~ 1/pdf_max', 'while', 'N', '<', 'multiplicity', ':', 'n_remaining', '=', 'multiplicity', '-', 'N', 'n_to_sample', '=', 'int', '(', '1.03', '*', 'pdf_max', '*', 'n_remaining', ')', 'phi_chunk', '=', 'self', '.', '_uniform_phi', '(', 'n_to_sample', ')', 'phi_chunk', '=', 'phi_chunk', '[', 'self', '.', '_pdf', '(', 'phi_chunk', ')', '>', 'np', '.', 'random', '.', 'uniform', '(', '0', ',', 'pdf_max', ',', 'n_to_sample', ')', ']', 'K', '=', 'min', '(', 'phi_chunk', '.', 'size', ',', 'n_remaining', ')', '# number of phi to take', 'phi', '[', 'N', ':', 'N', '+', 'K', ']', '=', 'phi_chunk', '[', ':', 'K', ']', 'N', '+=', 'K', 'return', 'phi'] | r"""
Randomly sample azimuthal angles `\phi`.
:param int multiplicity: Number to sample.
:returns: Array of sampled angles. | ['r', 'Randomly', 'sample', 'azimuthal', 'angles', '\\', 'phi', '.'] | train | https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/flow.py#L319-L354 |
2,018 | Dallinger/Dallinger | dallinger/db.py | init_db | def init_db(drop_all=False, bind=engine):
"""Initialize the database, optionally dropping existing tables."""
try:
if drop_all:
Base.metadata.drop_all(bind=bind)
Base.metadata.create_all(bind=bind)
except OperationalError as err:
msg = 'password authentication failed for user "dallinger"'
if msg in err.message:
sys.stderr.write(db_user_warning)
raise
return session | python | def init_db(drop_all=False, bind=engine):
"""Initialize the database, optionally dropping existing tables."""
try:
if drop_all:
Base.metadata.drop_all(bind=bind)
Base.metadata.create_all(bind=bind)
except OperationalError as err:
msg = 'password authentication failed for user "dallinger"'
if msg in err.message:
sys.stderr.write(db_user_warning)
raise
return session | ['def', 'init_db', '(', 'drop_all', '=', 'False', ',', 'bind', '=', 'engine', ')', ':', 'try', ':', 'if', 'drop_all', ':', 'Base', '.', 'metadata', '.', 'drop_all', '(', 'bind', '=', 'bind', ')', 'Base', '.', 'metadata', '.', 'create_all', '(', 'bind', '=', 'bind', ')', 'except', 'OperationalError', 'as', 'err', ':', 'msg', '=', '\'password authentication failed for user "dallinger"\'', 'if', 'msg', 'in', 'err', '.', 'message', ':', 'sys', '.', 'stderr', '.', 'write', '(', 'db_user_warning', ')', 'raise', 'return', 'session'] | Initialize the database, optionally dropping existing tables. | ['Initialize', 'the', 'database', 'optionally', 'dropping', 'existing', 'tables', '.'] | train | https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/db.py#L104-L116 |
2,019 | sdispater/eloquent | eloquent/migrations/migration_creator.py | MigrationCreator._populate_stub | def _populate_stub(self, name, stub, table):
"""
Populate the placeholders in the migration stub.
:param name: The name of the migration
:type name: str
:param stub: The stub
:type stub: str
:param table: The table name
:type table: str
:rtype: str
"""
stub = stub.replace('DummyClass', self._get_class_name(name))
if table is not None:
stub = stub.replace('dummy_table', table)
return stub | python | def _populate_stub(self, name, stub, table):
"""
Populate the placeholders in the migration stub.
:param name: The name of the migration
:type name: str
:param stub: The stub
:type stub: str
:param table: The table name
:type table: str
:rtype: str
"""
stub = stub.replace('DummyClass', self._get_class_name(name))
if table is not None:
stub = stub.replace('dummy_table', table)
return stub | ['def', '_populate_stub', '(', 'self', ',', 'name', ',', 'stub', ',', 'table', ')', ':', 'stub', '=', 'stub', '.', 'replace', '(', "'DummyClass'", ',', 'self', '.', '_get_class_name', '(', 'name', ')', ')', 'if', 'table', 'is', 'not', 'None', ':', 'stub', '=', 'stub', '.', 'replace', '(', "'dummy_table'", ',', 'table', ')', 'return', 'stub'] | Populate the placeholders in the migration stub.
:param name: The name of the migration
:type name: str
:param stub: The stub
:type stub: str
:param table: The table name
:type table: str
:rtype: str | ['Populate', 'the', 'placeholders', 'in', 'the', 'migration', 'stub', '.'] | train | https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/migrations/migration_creator.py#L70-L90 |
2,020 | nickmasster/xsmtplib | xsmtplib/xsmtplib.py | SMTP.connect_proxy | def connect_proxy(self, proxy_host='localhost', proxy_port=0, proxy_type=socks.HTTP,
host='localhost', port=0):
"""Connect to a host on a given port via proxy server
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host and proxy server are
specified during instantiation.
:param proxy_host: Hostname of proxy server
:type proxy_host: string
:param proxy_port: Port of proxy server, by default port for specified proxy type is used
:type proxy_port: int
:param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details)
:type proxy_type: int
:param host: Hostname of SMTP server
:type host: string
:param port: Port of SMTP server, by default smtplib.SMTP_PORT is used
:type port: int
:return: Tuple of (code, msg)
:rtype: tuple
"""
if proxy_type not in socks.DEFAULT_PORTS.keys():
raise NotSupportedProxyType
(proxy_host, proxy_port) = self._parse_host(host=proxy_host, port=proxy_port)
if not proxy_port:
proxy_port = socks.DEFAULT_PORTS[proxy_type]
(host, port) = self._parse_host(host=host, port=port)
if self.debuglevel > 0:
self._print_debug('connect: via proxy', proxy_host, proxy_port)
s = socks.socksocket()
s.set_proxy(proxy_type=proxy_type, addr=proxy_host, port=proxy_port)
s.settimeout(self.timeout)
if self.source_address is not None:
s.bind(self.source_address)
s.connect((host, port))
# todo
# Send CRLF in order to get first response from destination server.
# Probably it's needed only for HTTP proxies. Further investigation required.
s.sendall(bCRLF)
self.sock = s
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', repr(msg))
return code, msg | python | def connect_proxy(self, proxy_host='localhost', proxy_port=0, proxy_type=socks.HTTP,
host='localhost', port=0):
"""Connect to a host on a given port via proxy server
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host and proxy server are
specified during instantiation.
:param proxy_host: Hostname of proxy server
:type proxy_host: string
:param proxy_port: Port of proxy server, by default port for specified proxy type is used
:type proxy_port: int
:param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details)
:type proxy_type: int
:param host: Hostname of SMTP server
:type host: string
:param port: Port of SMTP server, by default smtplib.SMTP_PORT is used
:type port: int
:return: Tuple of (code, msg)
:rtype: tuple
"""
if proxy_type not in socks.DEFAULT_PORTS.keys():
raise NotSupportedProxyType
(proxy_host, proxy_port) = self._parse_host(host=proxy_host, port=proxy_port)
if not proxy_port:
proxy_port = socks.DEFAULT_PORTS[proxy_type]
(host, port) = self._parse_host(host=host, port=port)
if self.debuglevel > 0:
self._print_debug('connect: via proxy', proxy_host, proxy_port)
s = socks.socksocket()
s.set_proxy(proxy_type=proxy_type, addr=proxy_host, port=proxy_port)
s.settimeout(self.timeout)
if self.source_address is not None:
s.bind(self.source_address)
s.connect((host, port))
# todo
# Send CRLF in order to get first response from destination server.
# Probably it's needed only for HTTP proxies. Further investigation required.
s.sendall(bCRLF)
self.sock = s
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', repr(msg))
return code, msg | ['def', 'connect_proxy', '(', 'self', ',', 'proxy_host', '=', "'localhost'", ',', 'proxy_port', '=', '0', ',', 'proxy_type', '=', 'socks', '.', 'HTTP', ',', 'host', '=', "'localhost'", ',', 'port', '=', '0', ')', ':', 'if', 'proxy_type', 'not', 'in', 'socks', '.', 'DEFAULT_PORTS', '.', 'keys', '(', ')', ':', 'raise', 'NotSupportedProxyType', '(', 'proxy_host', ',', 'proxy_port', ')', '=', 'self', '.', '_parse_host', '(', 'host', '=', 'proxy_host', ',', 'port', '=', 'proxy_port', ')', 'if', 'not', 'proxy_port', ':', 'proxy_port', '=', 'socks', '.', 'DEFAULT_PORTS', '[', 'proxy_type', ']', '(', 'host', ',', 'port', ')', '=', 'self', '.', '_parse_host', '(', 'host', '=', 'host', ',', 'port', '=', 'port', ')', 'if', 'self', '.', 'debuglevel', '>', '0', ':', 'self', '.', '_print_debug', '(', "'connect: via proxy'", ',', 'proxy_host', ',', 'proxy_port', ')', 's', '=', 'socks', '.', 'socksocket', '(', ')', 's', '.', 'set_proxy', '(', 'proxy_type', '=', 'proxy_type', ',', 'addr', '=', 'proxy_host', ',', 'port', '=', 'proxy_port', ')', 's', '.', 'settimeout', '(', 'self', '.', 'timeout', ')', 'if', 'self', '.', 'source_address', 'is', 'not', 'None', ':', 's', '.', 'bind', '(', 'self', '.', 'source_address', ')', 's', '.', 'connect', '(', '(', 'host', ',', 'port', ')', ')', '# todo', '# Send CRLF in order to get first response from destination server.', "# Probably it's needed only for HTTP proxies. Further investigation required.", 's', '.', 'sendall', '(', 'bCRLF', ')', 'self', '.', 'sock', '=', 's', '(', 'code', ',', 'msg', ')', '=', 'self', '.', 'getreply', '(', ')', 'if', 'self', '.', 'debuglevel', '>', '0', ':', 'self', '.', '_print_debug', '(', "'connect:'", ',', 'repr', '(', 'msg', ')', ')', 'return', 'code', ',', 'msg'] | Connect to a host on a given port via proxy server
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host and proxy server are
specified during instantiation.
:param proxy_host: Hostname of proxy server
:type proxy_host: string
:param proxy_port: Port of proxy server, by default port for specified proxy type is used
:type proxy_port: int
:param proxy_type: Proxy type to use (see socks.PROXY_TYPES for details)
:type proxy_type: int
:param host: Hostname of SMTP server
:type host: string
:param port: Port of SMTP server, by default smtplib.SMTP_PORT is used
:type port: int
:return: Tuple of (code, msg)
:rtype: tuple | ['Connect', 'to', 'a', 'host', 'on', 'a', 'given', 'port', 'via', 'proxy', 'server'] | train | https://github.com/nickmasster/xsmtplib/blob/0207f5c72f2fec03f3ebdb3acb3a56401805f32f/xsmtplib/xsmtplib.py#L132-L183 |
2,021 | ScienceLogic/amiuploader | amiimporter/AWSUtilities.py | AWSUtils.validate_bucket | def validate_bucket(self):
"""
Do a quick check to see if the s3 bucket is valid
:return:
"""
s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project,
self.aws_regions[0])
print "Checking for s3 bucket"
try:
subprocess.check_output(shlex.split(s3_check_cmd))
except subprocess.CalledProcessError as e:
print "Error: {}".format(e)
print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\
.format(self.bucket_name)
sys.exit(5) | python | def validate_bucket(self):
"""
Do a quick check to see if the s3 bucket is valid
:return:
"""
s3_check_cmd = "aws s3 ls s3://{} --profile '{}' --region '{}'".format(self.bucket_name, self.aws_project,
self.aws_regions[0])
print "Checking for s3 bucket"
try:
subprocess.check_output(shlex.split(s3_check_cmd))
except subprocess.CalledProcessError as e:
print "Error: {}".format(e)
print "Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"\
.format(self.bucket_name)
sys.exit(5) | ['def', 'validate_bucket', '(', 'self', ')', ':', 's3_check_cmd', '=', '"aws s3 ls s3://{} --profile \'{}\' --region \'{}\'"', '.', 'format', '(', 'self', '.', 'bucket_name', ',', 'self', '.', 'aws_project', ',', 'self', '.', 'aws_regions', '[', '0', ']', ')', 'print', '"Checking for s3 bucket"', 'try', ':', 'subprocess', '.', 'check_output', '(', 'shlex', '.', 'split', '(', 's3_check_cmd', ')', ')', 'except', 'subprocess', '.', 'CalledProcessError', 'as', 'e', ':', 'print', '"Error: {}"', '.', 'format', '(', 'e', ')', 'print', '"Unable to query s3 bucket: {}. Validate that it exists, and your user has sufficient permissions"', '.', 'format', '(', 'self', '.', 'bucket_name', ')', 'sys', '.', 'exit', '(', '5', ')'] | Do a quick check to see if the s3 bucket is valid
:return: | ['Do', 'a', 'quick', 'check', 'to', 'see', 'if', 'the', 's3', 'bucket', 'is', 'valid', ':', 'return', ':'] | train | https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L83-L97 |
2,022 | addisonlynch/iexfinance | iexfinance/__init__.py | get_available_symbols | def get_available_symbols(**kwargs):
"""
MOVED to iexfinance.refdata.get_symbols
"""
import warnings
warnings.warn(WNG_MSG % ("get_available_symbols", "refdata.get_symbols"))
_ALL_SYMBOLS_URL = "https://api.iextrading.com/1.0/ref-data/symbols"
handler = _IEXBase(**kwargs)
response = handler._execute_iex_query(_ALL_SYMBOLS_URL)
if not response:
raise IEXQueryError("Could not download all symbols")
else:
return response | python | def get_available_symbols(**kwargs):
"""
MOVED to iexfinance.refdata.get_symbols
"""
import warnings
warnings.warn(WNG_MSG % ("get_available_symbols", "refdata.get_symbols"))
_ALL_SYMBOLS_URL = "https://api.iextrading.com/1.0/ref-data/symbols"
handler = _IEXBase(**kwargs)
response = handler._execute_iex_query(_ALL_SYMBOLS_URL)
if not response:
raise IEXQueryError("Could not download all symbols")
else:
return response | ['def', 'get_available_symbols', '(', '*', '*', 'kwargs', ')', ':', 'import', 'warnings', 'warnings', '.', 'warn', '(', 'WNG_MSG', '%', '(', '"get_available_symbols"', ',', '"refdata.get_symbols"', ')', ')', '_ALL_SYMBOLS_URL', '=', '"https://api.iextrading.com/1.0/ref-data/symbols"', 'handler', '=', '_IEXBase', '(', '*', '*', 'kwargs', ')', 'response', '=', 'handler', '.', '_execute_iex_query', '(', '_ALL_SYMBOLS_URL', ')', 'if', 'not', 'response', ':', 'raise', 'IEXQueryError', '(', '"Could not download all symbols"', ')', 'else', ':', 'return', 'response'] | MOVED to iexfinance.refdata.get_symbols | ['MOVED', 'to', 'iexfinance', '.', 'refdata', '.', 'get_symbols'] | train | https://github.com/addisonlynch/iexfinance/blob/40f0bdcc51b329031d06178020fd774494250456/iexfinance/__init__.py#L73-L85 |
2,023 | inveniosoftware/invenio-search | invenio_search/ext.py | _SearchState.flush_and_refresh | def flush_and_refresh(self, index):
"""Flush and refresh one or more indices.
.. warning::
Do not call this method unless you know what you are doing. This
method is only intended to be called during tests.
"""
self.client.indices.flush(wait_if_ongoing=True, index=index)
self.client.indices.refresh(index=index)
self.client.cluster.health(
wait_for_status='yellow', request_timeout=30)
return True | python | def flush_and_refresh(self, index):
"""Flush and refresh one or more indices.
.. warning::
Do not call this method unless you know what you are doing. This
method is only intended to be called during tests.
"""
self.client.indices.flush(wait_if_ongoing=True, index=index)
self.client.indices.refresh(index=index)
self.client.cluster.health(
wait_for_status='yellow', request_timeout=30)
return True | ['def', 'flush_and_refresh', '(', 'self', ',', 'index', ')', ':', 'self', '.', 'client', '.', 'indices', '.', 'flush', '(', 'wait_if_ongoing', '=', 'True', ',', 'index', '=', 'index', ')', 'self', '.', 'client', '.', 'indices', '.', 'refresh', '(', 'index', '=', 'index', ')', 'self', '.', 'client', '.', 'cluster', '.', 'health', '(', 'wait_for_status', '=', "'yellow'", ',', 'request_timeout', '=', '30', ')', 'return', 'True'] | Flush and refresh one or more indices.
.. warning::
Do not call this method unless you know what you are doing. This
method is only intended to be called during tests. | ['Flush', 'and', 'refresh', 'one', 'or', 'more', 'indices', '.'] | train | https://github.com/inveniosoftware/invenio-search/blob/19c073d608d4c811f1c5aecb6622402d39715228/invenio_search/ext.py#L221-L233 |
2,024 | seibert-media/Highton | highton/fields/field.py | Field.encode | def encode(self):
"""
Encodes the value of the field and put it in the element
also make the check for nil=true if there is one
:return: returns the encoded element
:rtype: xml.etree.ElementTree.Element
"""
element = ElementTree.Element(self.name)
element = self._set_nil(element, lambda value: str(value))
return element | python | def encode(self):
"""
Encodes the value of the field and put it in the element
also make the check for nil=true if there is one
:return: returns the encoded element
:rtype: xml.etree.ElementTree.Element
"""
element = ElementTree.Element(self.name)
element = self._set_nil(element, lambda value: str(value))
return element | ['def', 'encode', '(', 'self', ')', ':', 'element', '=', 'ElementTree', '.', 'Element', '(', 'self', '.', 'name', ')', 'element', '=', 'self', '.', '_set_nil', '(', 'element', ',', 'lambda', 'value', ':', 'str', '(', 'value', ')', ')', 'return', 'element'] | Encodes the value of the field and put it in the element
also make the check for nil=true if there is one
:return: returns the encoded element
:rtype: xml.etree.ElementTree.Element | ['Encodes', 'the', 'value', 'of', 'the', 'field', 'and', 'put', 'it', 'in', 'the', 'element', 'also', 'make', 'the', 'check', 'for', 'nil', '=', 'true', 'if', 'there', 'is', 'one'] | train | https://github.com/seibert-media/Highton/blob/1519e4fb105f62882c2e7bc81065d994649558d8/highton/fields/field.py#L18-L28 |
2,025 | etingof/pysnmp | pysnmp/smi/mibs/SNMPv2-SMI.py | MibTableColumn.destroyCommit | def destroyCommit(self, varBind, **context):
"""Destroy Managed Object Instance.
Implements the second of the multi-step workflow similar to the SNMP SET
command processing (:RFC:`1905#section-4.2.5`).
The goal of the second phase is to actually remove requested Managed
Object Instance from the MIB tree. When multiple Managed Objects Instances
are destroyed/modified at once (likely coming all in one SNMP PDU), each
of them has to run through the second (*commit*) phase successfully for
the system to transition to the third (*cleanup*) phase. If any single
*commit* step fails, the system transitions into the *undo* state for
each of Managed Objects Instances being processed at once.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to destroy
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
* `instances` (dict): user-supplied dict for temporarily holding
Managed Objects Instances being destroyed.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object.
"""
name, val = varBind
(debug.logger & debug.FLAG_INS and
debug.logger('%s: destroyCommit(%s, %r)' % (self, name, val)))
instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}})
idx = context['idx']
# NOTE: multiple names are possible in a single PDU, that could collide
# Therefore let's keep old object indexed by (negative) var-bind index
try:
instances[self.ST_DESTROY][-idx - 1] = self._vars.pop(name)
except KeyError:
pass
cbFun = context['cbFun']
cbFun(varBind, **context) | python | def destroyCommit(self, varBind, **context):
"""Destroy Managed Object Instance.
Implements the second of the multi-step workflow similar to the SNMP SET
command processing (:RFC:`1905#section-4.2.5`).
The goal of the second phase is to actually remove requested Managed
Object Instance from the MIB tree. When multiple Managed Objects Instances
are destroyed/modified at once (likely coming all in one SNMP PDU), each
of them has to run through the second (*commit*) phase successfully for
the system to transition to the third (*cleanup*) phase. If any single
*commit* step fails, the system transitions into the *undo* state for
each of Managed Objects Instances being processed at once.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to destroy
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
* `instances` (dict): user-supplied dict for temporarily holding
Managed Objects Instances being destroyed.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object.
"""
name, val = varBind
(debug.logger & debug.FLAG_INS and
debug.logger('%s: destroyCommit(%s, %r)' % (self, name, val)))
instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}})
idx = context['idx']
# NOTE: multiple names are possible in a single PDU, that could collide
# Therefore let's keep old object indexed by (negative) var-bind index
try:
instances[self.ST_DESTROY][-idx - 1] = self._vars.pop(name)
except KeyError:
pass
cbFun = context['cbFun']
cbFun(varBind, **context) | ['def', 'destroyCommit', '(', 'self', ',', 'varBind', ',', '*', '*', 'context', ')', ':', 'name', ',', 'val', '=', 'varBind', '(', 'debug', '.', 'logger', '&', 'debug', '.', 'FLAG_INS', 'and', 'debug', '.', 'logger', '(', "'%s: destroyCommit(%s, %r)'", '%', '(', 'self', ',', 'name', ',', 'val', ')', ')', ')', 'instances', '=', 'context', '[', "'instances'", ']', '.', 'setdefault', '(', 'self', '.', 'name', ',', '{', 'self', '.', 'ST_CREATE', ':', '{', '}', ',', 'self', '.', 'ST_DESTROY', ':', '{', '}', '}', ')', 'idx', '=', 'context', '[', "'idx'", ']', '# NOTE: multiple names are possible in a single PDU, that could collide', "# Therefore let's keep old object indexed by (negative) var-bind index", 'try', ':', 'instances', '[', 'self', '.', 'ST_DESTROY', ']', '[', '-', 'idx', '-', '1', ']', '=', 'self', '.', '_vars', '.', 'pop', '(', 'name', ')', 'except', 'KeyError', ':', 'pass', 'cbFun', '=', 'context', '[', "'cbFun'", ']', 'cbFun', '(', 'varBind', ',', '*', '*', 'context', ')'] | Destroy Managed Object Instance.
Implements the second of the multi-step workflow similar to the SNMP SET
command processing (:RFC:`1905#section-4.2.5`).
The goal of the second phase is to actually remove requested Managed
Object Instance from the MIB tree. When multiple Managed Objects Instances
are destroyed/modified at once (likely coming all in one SNMP PDU), each
of them has to run through the second (*commit*) phase successfully for
the system to transition to the third (*cleanup*) phase. If any single
*commit* step fails, the system transitions into the *undo* state for
each of Managed Objects Instances being processed at once.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to destroy
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
* `instances` (dict): user-supplied dict for temporarily holding
Managed Objects Instances being destroyed.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object. | ['Destroy', 'Managed', 'Object', 'Instance', '.'] | train | https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/mibs/SNMPv2-SMI.py#L2267-L2327 |
2,026 | 475Cumulus/TBone | tbone/data/models.py | ModelSerializer.serialize | async def serialize(self, native=False):
'''
Returns a serialized from of the model taking into account projection rules and ``@serialize`` decorated methods.
:param native:
Deternines if data is serialized to Python native types or primitive form. Defaults to ``False``
'''
data = {}
# iterate through all fields
for field_name, field in self._fields.items():
# serialize field data
raw_data = self._data.get(field_name)
# add field's data to model data based on projection settings
if field._projection != None: # noqa E711
field_data = await field.serialize(raw_data, native)
if field_data:
data[field_name] = field_data
elif field._projection == True: # noqa E711
data[field_name] = None
# iterate through all export methods
for name, func in self._serialize_methods.items():
data[name] = await func(self)
return data | python | async def serialize(self, native=False):
'''
Returns a serialized from of the model taking into account projection rules and ``@serialize`` decorated methods.
:param native:
Deternines if data is serialized to Python native types or primitive form. Defaults to ``False``
'''
data = {}
# iterate through all fields
for field_name, field in self._fields.items():
# serialize field data
raw_data = self._data.get(field_name)
# add field's data to model data based on projection settings
if field._projection != None: # noqa E711
field_data = await field.serialize(raw_data, native)
if field_data:
data[field_name] = field_data
elif field._projection == True: # noqa E711
data[field_name] = None
# iterate through all export methods
for name, func in self._serialize_methods.items():
data[name] = await func(self)
return data | ['async', 'def', 'serialize', '(', 'self', ',', 'native', '=', 'False', ')', ':', 'data', '=', '{', '}', '# iterate through all fields', 'for', 'field_name', ',', 'field', 'in', 'self', '.', '_fields', '.', 'items', '(', ')', ':', '# serialize field data', 'raw_data', '=', 'self', '.', '_data', '.', 'get', '(', 'field_name', ')', "# add field's data to model data based on projection settings", 'if', 'field', '.', '_projection', '!=', 'None', ':', '# noqa E711', 'field_data', '=', 'await', 'field', '.', 'serialize', '(', 'raw_data', ',', 'native', ')', 'if', 'field_data', ':', 'data', '[', 'field_name', ']', '=', 'field_data', 'elif', 'field', '.', '_projection', '==', 'True', ':', '# noqa E711', 'data', '[', 'field_name', ']', '=', 'None', '# iterate through all export methods', 'for', 'name', ',', 'func', 'in', 'self', '.', '_serialize_methods', '.', 'items', '(', ')', ':', 'data', '[', 'name', ']', '=', 'await', 'func', '(', 'self', ')', 'return', 'data'] | Returns a serialized from of the model taking into account projection rules and ``@serialize`` decorated methods.
:param native:
Deternines if data is serialized to Python native types or primitive form. Defaults to ``False`` | ['Returns', 'a', 'serialized', 'from', 'of', 'the', 'model', 'taking', 'into', 'account', 'projection', 'rules', 'and'] | train | https://github.com/475Cumulus/TBone/blob/5a6672d8bbac449a0ab9e99560609f671fe84d4d/tbone/data/models.py#L134-L157 |
2,027 | saltstack/salt | salt/modules/vsphere.py | vsan_add_disks | def vsan_add_disks(host, username, password, protocol=None, port=None, host_names=None):
'''
Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts need to add any VSAN-eligible disks to the host's
VSAN system.
If host_names is not provided, VSAN-eligible disks will be added to the hosts's
VSAN system for the ``host`` location instead. This is useful for when service
instance connection information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.vsan_add_disks my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.vsan_add_disks my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
host_names = _check_hosts(service_instance, host, host_names)
response = _get_vsan_eligible_disks(service_instance, host, host_names)
ret = {}
for host_name, value in six.iteritems(response):
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem
# We must have a VSAN Config in place before we can manipulate it.
if vsan_system is None:
msg = 'VSAN System Config Manager is unset for host \'{0}\'. ' \
'VSAN configuration cannot be changed without a configured ' \
'VSAN System.'.format(host_name)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
else:
eligible = value.get('Eligible')
error = value.get('Error')
if eligible and isinstance(eligible, list):
# If we have eligible, matching disks, add them to VSAN.
try:
task = vsan_system.AddDisks(eligible)
salt.utils.vmware.wait_for_task(task, host_name, 'Adding disks to VSAN', sleep_seconds=3)
except vim.fault.InsufficientDisks as err:
log.debug(err.msg)
ret.update({host_name: {'Error': err.msg}})
continue
except Exception as err:
msg = '\'vsphere.vsan_add_disks\' failed for host {0}: {1}'.format(host_name, err)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
log.debug(
'Successfully added disks to the VSAN system for host \'%s\'.',
host_name
)
# We need to return ONLY the disk names, otherwise Message Pack can't deserialize the disk objects.
disk_names = []
for disk in eligible:
disk_names.append(disk.canonicalName)
ret.update({host_name: {'Disks Added': disk_names}})
elif eligible and isinstance(eligible, six.string_types):
# If we have a string type in the eligible value, we don't
# have any VSAN-eligible disks. Pull the message through.
ret.update({host_name: {'Disks Added': eligible}})
elif error:
# If we hit an error, populate the Error return dict for state functions.
ret.update({host_name: {'Error': error}})
else:
# If we made it this far, we somehow have eligible disks, but they didn't
# match the disk list and just got an empty list of matching disks.
ret.update({host_name: {'Disks Added': 'No new VSAN-eligible disks were found to add.'}})
return ret | python | def vsan_add_disks(host, username, password, protocol=None, port=None, host_names=None):
'''
Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts need to add any VSAN-eligible disks to the host's
VSAN system.
If host_names is not provided, VSAN-eligible disks will be added to the hosts's
VSAN system for the ``host`` location instead. This is useful for when service
instance connection information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.vsan_add_disks my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.vsan_add_disks my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
'''
service_instance = salt.utils.vmware.get_service_instance(host=host,
username=username,
password=password,
protocol=protocol,
port=port)
host_names = _check_hosts(service_instance, host, host_names)
response = _get_vsan_eligible_disks(service_instance, host, host_names)
ret = {}
for host_name, value in six.iteritems(response):
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem
# We must have a VSAN Config in place before we can manipulate it.
if vsan_system is None:
msg = 'VSAN System Config Manager is unset for host \'{0}\'. ' \
'VSAN configuration cannot be changed without a configured ' \
'VSAN System.'.format(host_name)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
else:
eligible = value.get('Eligible')
error = value.get('Error')
if eligible and isinstance(eligible, list):
# If we have eligible, matching disks, add them to VSAN.
try:
task = vsan_system.AddDisks(eligible)
salt.utils.vmware.wait_for_task(task, host_name, 'Adding disks to VSAN', sleep_seconds=3)
except vim.fault.InsufficientDisks as err:
log.debug(err.msg)
ret.update({host_name: {'Error': err.msg}})
continue
except Exception as err:
msg = '\'vsphere.vsan_add_disks\' failed for host {0}: {1}'.format(host_name, err)
log.debug(msg)
ret.update({host_name: {'Error': msg}})
continue
log.debug(
'Successfully added disks to the VSAN system for host \'%s\'.',
host_name
)
# We need to return ONLY the disk names, otherwise Message Pack can't deserialize the disk objects.
disk_names = []
for disk in eligible:
disk_names.append(disk.canonicalName)
ret.update({host_name: {'Disks Added': disk_names}})
elif eligible and isinstance(eligible, six.string_types):
# If we have a string type in the eligible value, we don't
# have any VSAN-eligible disks. Pull the message through.
ret.update({host_name: {'Disks Added': eligible}})
elif error:
# If we hit an error, populate the Error return dict for state functions.
ret.update({host_name: {'Error': error}})
else:
# If we made it this far, we somehow have eligible disks, but they didn't
# match the disk list and just got an empty list of matching disks.
ret.update({host_name: {'Disks Added': 'No new VSAN-eligible disks were found to add.'}})
return ret | ['def', 'vsan_add_disks', '(', 'host', ',', 'username', ',', 'password', ',', 'protocol', '=', 'None', ',', 'port', '=', 'None', ',', 'host_names', '=', 'None', ')', ':', 'service_instance', '=', 'salt', '.', 'utils', '.', 'vmware', '.', 'get_service_instance', '(', 'host', '=', 'host', ',', 'username', '=', 'username', ',', 'password', '=', 'password', ',', 'protocol', '=', 'protocol', ',', 'port', '=', 'port', ')', 'host_names', '=', '_check_hosts', '(', 'service_instance', ',', 'host', ',', 'host_names', ')', 'response', '=', '_get_vsan_eligible_disks', '(', 'service_instance', ',', 'host', ',', 'host_names', ')', 'ret', '=', '{', '}', 'for', 'host_name', ',', 'value', 'in', 'six', '.', 'iteritems', '(', 'response', ')', ':', 'host_ref', '=', '_get_host_ref', '(', 'service_instance', ',', 'host', ',', 'host_name', '=', 'host_name', ')', 'vsan_system', '=', 'host_ref', '.', 'configManager', '.', 'vsanSystem', '# We must have a VSAN Config in place before we can manipulate it.', 'if', 'vsan_system', 'is', 'None', ':', 'msg', '=', "'VSAN System Config Manager is unset for host \\'{0}\\'. '", "'VSAN configuration cannot be changed without a configured '", "'VSAN System.'", '.', 'format', '(', 'host_name', ')', 'log', '.', 'debug', '(', 'msg', ')', 'ret', '.', 'update', '(', '{', 'host_name', ':', '{', "'Error'", ':', 'msg', '}', '}', ')', 'else', ':', 'eligible', '=', 'value', '.', 'get', '(', "'Eligible'", ')', 'error', '=', 'value', '.', 'get', '(', "'Error'", ')', 'if', 'eligible', 'and', 'isinstance', '(', 'eligible', ',', 'list', ')', ':', '# If we have eligible, matching disks, add them to VSAN.', 'try', ':', 'task', '=', 'vsan_system', '.', 'AddDisks', '(', 'eligible', ')', 'salt', '.', 'utils', '.', 'vmware', '.', 'wait_for_task', '(', 'task', ',', 'host_name', ',', "'Adding disks to VSAN'", ',', 'sleep_seconds', '=', '3', ')', 'except', 'vim', '.', 'fault', '.', 'InsufficientDisks', 'as', 'err', ':', 'log', '.', 'debug', '(', 'err', '.', 'msg', ')', 'ret', '.', 'update', '(', '{', 'host_name', ':', '{', "'Error'", ':', 'err', '.', 'msg', '}', '}', ')', 'continue', 'except', 'Exception', 'as', 'err', ':', 'msg', '=', "'\\'vsphere.vsan_add_disks\\' failed for host {0}: {1}'", '.', 'format', '(', 'host_name', ',', 'err', ')', 'log', '.', 'debug', '(', 'msg', ')', 'ret', '.', 'update', '(', '{', 'host_name', ':', '{', "'Error'", ':', 'msg', '}', '}', ')', 'continue', 'log', '.', 'debug', '(', "'Successfully added disks to the VSAN system for host \\'%s\\'.'", ',', 'host_name', ')', "# We need to return ONLY the disk names, otherwise Message Pack can't deserialize the disk objects.", 'disk_names', '=', '[', ']', 'for', 'disk', 'in', 'eligible', ':', 'disk_names', '.', 'append', '(', 'disk', '.', 'canonicalName', ')', 'ret', '.', 'update', '(', '{', 'host_name', ':', '{', "'Disks Added'", ':', 'disk_names', '}', '}', ')', 'elif', 'eligible', 'and', 'isinstance', '(', 'eligible', ',', 'six', '.', 'string_types', ')', ':', "# If we have a string type in the eligible value, we don't", '# have any VSAN-eligible disks. Pull the message through.', 'ret', '.', 'update', '(', '{', 'host_name', ':', '{', "'Disks Added'", ':', 'eligible', '}', '}', ')', 'elif', 'error', ':', '# If we hit an error, populate the Error return dict for state functions.', 'ret', '.', 'update', '(', '{', 'host_name', ':', '{', "'Error'", ':', 'error', '}', '}', ')', 'else', ':', "# If we made it this far, we somehow have eligible disks, but they didn't", '# match the disk list and just got an empty list of matching disks.', 'ret', '.', 'update', '(', '{', 'host_name', ':', '{', "'Disks Added'", ':', "'No new VSAN-eligible disks were found to add.'", '}', '}', ')', 'return', 'ret'] | Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts need to add any VSAN-eligible disks to the host's
VSAN system.
If host_names is not provided, VSAN-eligible disks will be added to the hosts's
VSAN system for the ``host`` location instead. This is useful for when service
instance connection information is used for a single ESXi host.
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.vsan_add_disks my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.vsan_add_disks my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]' | ['Add', 'any', 'VSAN', '-', 'eligible', 'disks', 'to', 'the', 'VSAN', 'System', 'for', 'the', 'given', 'host', 'or', 'list', 'of', 'host_names', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L3367-L3469 |
2,028 | materialsproject/pymatgen | pymatgen/io/abinit/nodes.py | Node.isinstance | def isinstance(self, class_or_string):
"""
Check whether the node is a instance of `class_or_string`.
Unlinke the standard isinstance builtin, the method accepts either a class or a string.
In the later case, the string is compared with self.__class__.__name__ (case insensitive).
"""
if class_or_string is None:
return False
import inspect
if inspect.isclass(class_or_string):
return isinstance(self, class_or_string)
else:
return self.__class__.__name__.lower() == class_or_string.lower() | python | def isinstance(self, class_or_string):
"""
Check whether the node is a instance of `class_or_string`.
Unlinke the standard isinstance builtin, the method accepts either a class or a string.
In the later case, the string is compared with self.__class__.__name__ (case insensitive).
"""
if class_or_string is None:
return False
import inspect
if inspect.isclass(class_or_string):
return isinstance(self, class_or_string)
else:
return self.__class__.__name__.lower() == class_or_string.lower() | ['def', 'isinstance', '(', 'self', ',', 'class_or_string', ')', ':', 'if', 'class_or_string', 'is', 'None', ':', 'return', 'False', 'import', 'inspect', 'if', 'inspect', '.', 'isclass', '(', 'class_or_string', ')', ':', 'return', 'isinstance', '(', 'self', ',', 'class_or_string', ')', 'else', ':', 'return', 'self', '.', '__class__', '.', '__name__', '.', 'lower', '(', ')', '==', 'class_or_string', '.', 'lower', '(', ')'] | Check whether the node is a instance of `class_or_string`.
Unlinke the standard isinstance builtin, the method accepts either a class or a string.
In the later case, the string is compared with self.__class__.__name__ (case insensitive). | ['Check', 'whether', 'the', 'node', 'is', 'a', 'instance', 'of', 'class_or_string', '.', 'Unlinke', 'the', 'standard', 'isinstance', 'builtin', 'the', 'method', 'accepts', 'either', 'a', 'class', 'or', 'a', 'string', '.', 'In', 'the', 'later', 'case', 'the', 'string', 'is', 'compared', 'with', 'self', '.', '__class__', '.', '__name__', '(', 'case', 'insensitive', ')', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/nodes.py#L537-L549 |
2,029 | Kensuke-Mitsuzawa/JapaneseTokenizers | JapaneseTokenizer/common/sever_handler.py | UnixProcessHandler.launch_process | def launch_process(self, command):
# type: (Union[bytes,text_type])->None
"""* What you can do
- It starts process and keep it.
"""
if not self.option is None:
command_plus_option = self.command + " " + self.option
else:
command_plus_option = self.command
if six.PY3:
if shutil.which(command) is None:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
else:
doc_command_string = "echo '' | {}".format(command)
command_check = os.system(doc_command_string)
if not command_check == 0:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid | python | def launch_process(self, command):
# type: (Union[bytes,text_type])->None
"""* What you can do
- It starts process and keep it.
"""
if not self.option is None:
command_plus_option = self.command + " " + self.option
else:
command_plus_option = self.command
if six.PY3:
if shutil.which(command) is None:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
else:
doc_command_string = "echo '' | {}".format(command)
command_check = os.system(doc_command_string)
if not command_check == 0:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid | ['def', 'launch_process', '(', 'self', ',', 'command', ')', ':', '# type: (Union[bytes,text_type])->None', 'if', 'not', 'self', '.', 'option', 'is', 'None', ':', 'command_plus_option', '=', 'self', '.', 'command', '+', '" "', '+', 'self', '.', 'option', 'else', ':', 'command_plus_option', '=', 'self', '.', 'command', 'if', 'six', '.', 'PY3', ':', 'if', 'shutil', '.', 'which', '(', 'command', ')', 'is', 'None', ':', 'raise', 'Exception', '(', '"No command at {}"', '.', 'format', '(', 'command', ')', ')', 'else', ':', 'self', '.', 'process_analyzer', '=', 'pexpect', '.', 'spawnu', '(', 'command_plus_option', ')', 'self', '.', 'process_id', '=', 'self', '.', 'process_analyzer', '.', 'pid', 'else', ':', 'doc_command_string', '=', '"echo \'\' | {}"', '.', 'format', '(', 'command', ')', 'command_check', '=', 'os', '.', 'system', '(', 'doc_command_string', ')', 'if', 'not', 'command_check', '==', '0', ':', 'raise', 'Exception', '(', '"No command at {}"', '.', 'format', '(', 'command', ')', ')', 'else', ':', 'self', '.', 'process_analyzer', '=', 'pexpect', '.', 'spawnu', '(', 'command_plus_option', ')', 'self', '.', 'process_id', '=', 'self', '.', 'process_analyzer', '.', 'pid'] | * What you can do
- It starts process and keep it. | ['*', 'What', 'you', 'can', 'do', '-', 'It', 'starts', 'process', 'and', 'keep', 'it', '.'] | train | https://github.com/Kensuke-Mitsuzawa/JapaneseTokenizers/blob/3bdfb6be73de0f78e5c08f3a51376ad3efa00b6c/JapaneseTokenizer/common/sever_handler.py#L44-L67 |
2,030 | splunk/splunk-sdk-python | splunklib/binding.py | ResponseReader.read | def read(self, size = None):
"""Reads a given number of characters from the response.
:param size: The number of characters to read, or "None" to read the
entire response.
:type size: ``integer`` or "None"
"""
r = self._buffer
self._buffer = b''
if size is not None:
size -= len(r)
r = r + self._response.read(size)
return r | python | def read(self, size = None):
"""Reads a given number of characters from the response.
:param size: The number of characters to read, or "None" to read the
entire response.
:type size: ``integer`` or "None"
"""
r = self._buffer
self._buffer = b''
if size is not None:
size -= len(r)
r = r + self._response.read(size)
return r | ['def', 'read', '(', 'self', ',', 'size', '=', 'None', ')', ':', 'r', '=', 'self', '.', '_buffer', 'self', '.', '_buffer', '=', "b''", 'if', 'size', 'is', 'not', 'None', ':', 'size', '-=', 'len', '(', 'r', ')', 'r', '=', 'r', '+', 'self', '.', '_response', '.', 'read', '(', 'size', ')', 'return', 'r'] | Reads a given number of characters from the response.
:param size: The number of characters to read, or "None" to read the
entire response.
:type size: ``integer`` or "None" | ['Reads', 'a', 'given', 'number', 'of', 'characters', 'from', 'the', 'response', '.'] | train | https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/binding.py#L1303-L1316 |
2,031 | woolfson-group/isambard | isambard/ampal/protein.py | Polypeptide.valid_backbone_bond_lengths | def valid_backbone_bond_lengths(self, atol=0.1):
"""True if all backbone bonds are within atol Angstroms of the expected distance.
Notes
-----
Ideal bond lengths taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in Angstoms for the absolute deviation
away from ideal backbone bond lengths.
"""
bond_lengths = self.backbone_bond_lengths
a1 = numpy.allclose(bond_lengths['n_ca'],
[ideal_backbone_bond_lengths['n_ca']] * len(self),
atol=atol)
a2 = numpy.allclose(bond_lengths['ca_c'],
[ideal_backbone_bond_lengths['ca_c']] * len(self),
atol=atol)
a3 = numpy.allclose(bond_lengths['c_o'],
[ideal_backbone_bond_lengths['c_o']] * len(self),
atol=atol)
a4 = numpy.allclose(bond_lengths['c_n'],
[ideal_backbone_bond_lengths['c_n']] *
(len(self) - 1),
atol=atol)
return all([a1, a2, a3, a4]) | python | def valid_backbone_bond_lengths(self, atol=0.1):
"""True if all backbone bonds are within atol Angstroms of the expected distance.
Notes
-----
Ideal bond lengths taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in Angstoms for the absolute deviation
away from ideal backbone bond lengths.
"""
bond_lengths = self.backbone_bond_lengths
a1 = numpy.allclose(bond_lengths['n_ca'],
[ideal_backbone_bond_lengths['n_ca']] * len(self),
atol=atol)
a2 = numpy.allclose(bond_lengths['ca_c'],
[ideal_backbone_bond_lengths['ca_c']] * len(self),
atol=atol)
a3 = numpy.allclose(bond_lengths['c_o'],
[ideal_backbone_bond_lengths['c_o']] * len(self),
atol=atol)
a4 = numpy.allclose(bond_lengths['c_n'],
[ideal_backbone_bond_lengths['c_n']] *
(len(self) - 1),
atol=atol)
return all([a1, a2, a3, a4]) | ['def', 'valid_backbone_bond_lengths', '(', 'self', ',', 'atol', '=', '0.1', ')', ':', 'bond_lengths', '=', 'self', '.', 'backbone_bond_lengths', 'a1', '=', 'numpy', '.', 'allclose', '(', 'bond_lengths', '[', "'n_ca'", ']', ',', '[', 'ideal_backbone_bond_lengths', '[', "'n_ca'", ']', ']', '*', 'len', '(', 'self', ')', ',', 'atol', '=', 'atol', ')', 'a2', '=', 'numpy', '.', 'allclose', '(', 'bond_lengths', '[', "'ca_c'", ']', ',', '[', 'ideal_backbone_bond_lengths', '[', "'ca_c'", ']', ']', '*', 'len', '(', 'self', ')', ',', 'atol', '=', 'atol', ')', 'a3', '=', 'numpy', '.', 'allclose', '(', 'bond_lengths', '[', "'c_o'", ']', ',', '[', 'ideal_backbone_bond_lengths', '[', "'c_o'", ']', ']', '*', 'len', '(', 'self', ')', ',', 'atol', '=', 'atol', ')', 'a4', '=', 'numpy', '.', 'allclose', '(', 'bond_lengths', '[', "'c_n'", ']', ',', '[', 'ideal_backbone_bond_lengths', '[', "'c_n'", ']', ']', '*', '(', 'len', '(', 'self', ')', '-', '1', ')', ',', 'atol', '=', 'atol', ')', 'return', 'all', '(', '[', 'a1', ',', 'a2', ',', 'a3', ',', 'a4', ']', ')'] | True if all backbone bonds are within atol Angstroms of the expected distance.
Notes
-----
Ideal bond lengths taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in Angstoms for the absolute deviation
away from ideal backbone bond lengths. | ['True', 'if', 'all', 'backbone', 'bonds', 'are', 'within', 'atol', 'Angstroms', 'of', 'the', 'expected', 'distance', '.'] | train | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/protein.py#L891-L923 |
2,032 | saltstack/salt | salt/utils/schedule.py | Schedule.run_job | def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data) | python | def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data) | ['def', 'run_job', '(', 'self', ',', 'name', ')', ':', 'data', '=', 'self', '.', '_get_schedule', '(', ')', '.', 'get', '(', 'name', ',', '{', '}', ')', 'if', "'function'", 'in', 'data', ':', 'func', '=', 'data', '[', "'function'", ']', 'elif', "'func'", 'in', 'data', ':', 'func', '=', 'data', '[', "'func'", ']', 'elif', "'fun'", 'in', 'data', ':', 'func', '=', 'data', '[', "'fun'", ']', 'else', ':', 'func', '=', 'None', 'if', 'not', 'isinstance', '(', 'func', ',', 'list', ')', ':', 'func', '=', '[', 'func', ']', 'for', '_func', 'in', 'func', ':', 'if', '_func', 'not', 'in', 'self', '.', 'functions', ':', 'log', '.', 'error', '(', "'Invalid function: %s in scheduled job %s.'", ',', '_func', ',', 'name', ')', 'if', "'name'", 'not', 'in', 'data', ':', 'data', '[', "'name'", ']', '=', 'name', 'log', '.', 'info', '(', "'Running Job: %s'", ',', 'name', ')', '# Grab run, assume True', 'run', '=', 'data', '.', 'get', '(', "'run'", ',', 'True', ')', 'if', 'run', ':', 'self', '.', '_run_job', '(', '_func', ',', 'data', ')'] | Run a schedule job now | ['Run', 'a', 'schedule', 'job', 'now'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L440-L470 |
2,033 | numenta/htmresearch | projects/l2_pooling/continuous_location.py | runBasic | def runBasic(noiseLevel=None, profile=False):
"""
Runs a basic experiment on continuous locations, learning a few locations on
four basic objects, and inferring one of them.
This experiment is mostly used for testing the pipeline, as the learned
locations are too random and sparse to actually perform inference.
Parameters:
----------------------------
@param noiseLevel (float)
Noise level to add to the locations and features during inference
@param profile (bool)
If True, the network will be profiled after learning and inference
"""
exp = L4L2Experiment(
"basic_continuous",
numCorticalColumns=2
)
objects = createObjectMachine(
machineType="continuous",
numInputBits=21,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=2,
)
objects.addObject(Sphere(radius=20), name="sphere")
objects.addObject(Cylinder(height=50, radius=20), name="cylinder")
objects.addObject(Box(dimensions=[10, 20, 30,]), name="box")
objects.addObject(Cube(width=20), name="cube")
learnConfig = {
"sphere": [("surface", 10)],
# the two learning config below will be exactly the same
"box": [("face", 5), ("edge", 5), ("vertex", 5)],
"cube": [(feature, 5) for feature in objects["cube"].getFeatures()],
"cylinder": [(feature, 5) for feature in objects["cylinder"].getFeatures()]
}
exp.learnObjects(
objects.provideObjectsToLearn(learnConfig, plot=True),
reset=True
)
if profile:
exp.printProfile()
inferConfig = {
"numSteps": 4,
"noiseLevel": noiseLevel,
"objectName": "cube",
"pairs": {
0: ["face", "face", "edge", "edge"],
1: ["edge", "face", "face", "edge"]
}
}
exp.infer(
objects.provideObjectToInfer(inferConfig, plot=True),
objectName="cube",
reset=True
)
if profile:
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
) | python | def runBasic(noiseLevel=None, profile=False):
"""
Runs a basic experiment on continuous locations, learning a few locations on
four basic objects, and inferring one of them.
This experiment is mostly used for testing the pipeline, as the learned
locations are too random and sparse to actually perform inference.
Parameters:
----------------------------
@param noiseLevel (float)
Noise level to add to the locations and features during inference
@param profile (bool)
If True, the network will be profiled after learning and inference
"""
exp = L4L2Experiment(
"basic_continuous",
numCorticalColumns=2
)
objects = createObjectMachine(
machineType="continuous",
numInputBits=21,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=2,
)
objects.addObject(Sphere(radius=20), name="sphere")
objects.addObject(Cylinder(height=50, radius=20), name="cylinder")
objects.addObject(Box(dimensions=[10, 20, 30,]), name="box")
objects.addObject(Cube(width=20), name="cube")
learnConfig = {
"sphere": [("surface", 10)],
# the two learning config below will be exactly the same
"box": [("face", 5), ("edge", 5), ("vertex", 5)],
"cube": [(feature, 5) for feature in objects["cube"].getFeatures()],
"cylinder": [(feature, 5) for feature in objects["cylinder"].getFeatures()]
}
exp.learnObjects(
objects.provideObjectsToLearn(learnConfig, plot=True),
reset=True
)
if profile:
exp.printProfile()
inferConfig = {
"numSteps": 4,
"noiseLevel": noiseLevel,
"objectName": "cube",
"pairs": {
0: ["face", "face", "edge", "edge"],
1: ["edge", "face", "face", "edge"]
}
}
exp.infer(
objects.provideObjectToInfer(inferConfig, plot=True),
objectName="cube",
reset=True
)
if profile:
exp.printProfile()
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
) | ['def', 'runBasic', '(', 'noiseLevel', '=', 'None', ',', 'profile', '=', 'False', ')', ':', 'exp', '=', 'L4L2Experiment', '(', '"basic_continuous"', ',', 'numCorticalColumns', '=', '2', ')', 'objects', '=', 'createObjectMachine', '(', 'machineType', '=', '"continuous"', ',', 'numInputBits', '=', '21', ',', 'sensorInputSize', '=', '1024', ',', 'externalInputSize', '=', '1024', ',', 'numCorticalColumns', '=', '2', ',', ')', 'objects', '.', 'addObject', '(', 'Sphere', '(', 'radius', '=', '20', ')', ',', 'name', '=', '"sphere"', ')', 'objects', '.', 'addObject', '(', 'Cylinder', '(', 'height', '=', '50', ',', 'radius', '=', '20', ')', ',', 'name', '=', '"cylinder"', ')', 'objects', '.', 'addObject', '(', 'Box', '(', 'dimensions', '=', '[', '10', ',', '20', ',', '30', ',', ']', ')', ',', 'name', '=', '"box"', ')', 'objects', '.', 'addObject', '(', 'Cube', '(', 'width', '=', '20', ')', ',', 'name', '=', '"cube"', ')', 'learnConfig', '=', '{', '"sphere"', ':', '[', '(', '"surface"', ',', '10', ')', ']', ',', '# the two learning config below will be exactly the same', '"box"', ':', '[', '(', '"face"', ',', '5', ')', ',', '(', '"edge"', ',', '5', ')', ',', '(', '"vertex"', ',', '5', ')', ']', ',', '"cube"', ':', '[', '(', 'feature', ',', '5', ')', 'for', 'feature', 'in', 'objects', '[', '"cube"', ']', '.', 'getFeatures', '(', ')', ']', ',', '"cylinder"', ':', '[', '(', 'feature', ',', '5', ')', 'for', 'feature', 'in', 'objects', '[', '"cylinder"', ']', '.', 'getFeatures', '(', ')', ']', '}', 'exp', '.', 'learnObjects', '(', 'objects', '.', 'provideObjectsToLearn', '(', 'learnConfig', ',', 'plot', '=', 'True', ')', ',', 'reset', '=', 'True', ')', 'if', 'profile', ':', 'exp', '.', 'printProfile', '(', ')', 'inferConfig', '=', '{', '"numSteps"', ':', '4', ',', '"noiseLevel"', ':', 'noiseLevel', ',', '"objectName"', ':', '"cube"', ',', '"pairs"', ':', '{', '0', ':', '[', '"face"', ',', '"face"', ',', '"edge"', ',', '"edge"', ']', ',', '1', ':', '[', '"edge"', ',', '"face"', ',', '"face"', ',', '"edge"', ']', '}', '}', 'exp', '.', 'infer', '(', 'objects', '.', 'provideObjectToInfer', '(', 'inferConfig', ',', 'plot', '=', 'True', ')', ',', 'objectName', '=', '"cube"', ',', 'reset', '=', 'True', ')', 'if', 'profile', ':', 'exp', '.', 'printProfile', '(', ')', 'exp', '.', 'plotInferenceStats', '(', 'fields', '=', '[', '"L2 Representation"', ',', '"Overlap L2 with object"', ',', '"L4 Representation"', ']', ',', ')'] | Runs a basic experiment on continuous locations, learning a few locations on
four basic objects, and inferring one of them.
This experiment is mostly used for testing the pipeline, as the learned
locations are too random and sparse to actually perform inference.
Parameters:
----------------------------
@param noiseLevel (float)
Noise level to add to the locations and features during inference
@param profile (bool)
If True, the network will be profiled after learning and inference | ['Runs', 'a', 'basic', 'experiment', 'on', 'continuous', 'locations', 'learning', 'a', 'few', 'locations', 'on', 'four', 'basic', 'objects', 'and', 'inferring', 'one', 'of', 'them', '.'] | train | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/l2_pooling/continuous_location.py#L35-L107 |
2,034 | agoragames/kairos | kairos/sql_backend.py | SqlGauge._insert_data | def _insert_data(self, name, value, timestamp, interval, config, **kwargs):
'''Helper to insert data into sql.'''
conn = self._client.connect()
if not self._update_data(name, value, timestamp, interval, config, conn):
try:
kwargs = {
'name' : name,
'interval' : interval,
'i_time' : config['i_calc'].to_bucket(timestamp),
'value' : value
}
if not config['coarse']:
kwargs['r_time'] = config['r_calc'].to_bucket(timestamp)
stmt = self._table.insert().values(**kwargs)
result = conn.execute(stmt)
except:
# TODO: only catch IntegrityError
if not self._update_data(name, value, timestamp, interval, config, conn):
raise | python | def _insert_data(self, name, value, timestamp, interval, config, **kwargs):
'''Helper to insert data into sql.'''
conn = self._client.connect()
if not self._update_data(name, value, timestamp, interval, config, conn):
try:
kwargs = {
'name' : name,
'interval' : interval,
'i_time' : config['i_calc'].to_bucket(timestamp),
'value' : value
}
if not config['coarse']:
kwargs['r_time'] = config['r_calc'].to_bucket(timestamp)
stmt = self._table.insert().values(**kwargs)
result = conn.execute(stmt)
except:
# TODO: only catch IntegrityError
if not self._update_data(name, value, timestamp, interval, config, conn):
raise | ['def', '_insert_data', '(', 'self', ',', 'name', ',', 'value', ',', 'timestamp', ',', 'interval', ',', 'config', ',', '*', '*', 'kwargs', ')', ':', 'conn', '=', 'self', '.', '_client', '.', 'connect', '(', ')', 'if', 'not', 'self', '.', '_update_data', '(', 'name', ',', 'value', ',', 'timestamp', ',', 'interval', ',', 'config', ',', 'conn', ')', ':', 'try', ':', 'kwargs', '=', '{', "'name'", ':', 'name', ',', "'interval'", ':', 'interval', ',', "'i_time'", ':', 'config', '[', "'i_calc'", ']', '.', 'to_bucket', '(', 'timestamp', ')', ',', "'value'", ':', 'value', '}', 'if', 'not', 'config', '[', "'coarse'", ']', ':', 'kwargs', '[', "'r_time'", ']', '=', 'config', '[', "'r_calc'", ']', '.', 'to_bucket', '(', 'timestamp', ')', 'stmt', '=', 'self', '.', '_table', '.', 'insert', '(', ')', '.', 'values', '(', '*', '*', 'kwargs', ')', 'result', '=', 'conn', '.', 'execute', '(', 'stmt', ')', 'except', ':', '# TODO: only catch IntegrityError', 'if', 'not', 'self', '.', '_update_data', '(', 'name', ',', 'value', ',', 'timestamp', ',', 'interval', ',', 'config', ',', 'conn', ')', ':', 'raise'] | Helper to insert data into sql. | ['Helper', 'to', 'insert', 'data', 'into', 'sql', '.'] | train | https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/sql_backend.py#L508-L526 |
2,035 | senaite/senaite.core | bika/lims/browser/widgets/referenceresultswidget.py | ReferenceResultsWidget.process_form | def process_form(self, instance, field, form,
empty_marker=None, emptyReturnsMarker=False):
"""Return a list of dictionaries fit for ReferenceResultsField
consumption. Only services which have float()able entries in result,min
and max field will be included. If any of min, max, or result fields
are blank, the row value is ignored here.
"""
values = {}
# Process settings from the reference definition first
ref_def = form.get("ReferenceDefinition")
ref_def_uid = ref_def and ref_def[0]
if ref_def_uid:
ref_def_obj = api.get_object_by_uid(ref_def_uid)
ref_results = ref_def_obj.getReferenceResults()
# store reference results by UID to avoid duplicates
rr_by_uid = dict(map(lambda r: (r.get("uid"), r), ref_results))
values.update(rr_by_uid)
# selected services
service_uids = form.get("uids", [])
for uid in service_uids:
result = self._get_spec_value(form, uid, "result")
if not result:
# User has to set a value for result subfield at least
continue
# If neither min nor max have been set, assume we only accept a
# discrete result (like if % of error was 0).
s_min = self._get_spec_value(form, uid, "min", result)
s_max = self._get_spec_value(form, uid, "max", result)
service = api.get_object_by_uid(uid)
values[uid] = {
"keyword": service.getKeyword(),
"uid": uid,
"result": result,
"min": s_min,
"max": s_max
}
return values.values(), {} | python | def process_form(self, instance, field, form,
empty_marker=None, emptyReturnsMarker=False):
"""Return a list of dictionaries fit for ReferenceResultsField
consumption. Only services which have float()able entries in result,min
and max field will be included. If any of min, max, or result fields
are blank, the row value is ignored here.
"""
values = {}
# Process settings from the reference definition first
ref_def = form.get("ReferenceDefinition")
ref_def_uid = ref_def and ref_def[0]
if ref_def_uid:
ref_def_obj = api.get_object_by_uid(ref_def_uid)
ref_results = ref_def_obj.getReferenceResults()
# store reference results by UID to avoid duplicates
rr_by_uid = dict(map(lambda r: (r.get("uid"), r), ref_results))
values.update(rr_by_uid)
# selected services
service_uids = form.get("uids", [])
for uid in service_uids:
result = self._get_spec_value(form, uid, "result")
if not result:
# User has to set a value for result subfield at least
continue
# If neither min nor max have been set, assume we only accept a
# discrete result (like if % of error was 0).
s_min = self._get_spec_value(form, uid, "min", result)
s_max = self._get_spec_value(form, uid, "max", result)
service = api.get_object_by_uid(uid)
values[uid] = {
"keyword": service.getKeyword(),
"uid": uid,
"result": result,
"min": s_min,
"max": s_max
}
return values.values(), {} | ['def', 'process_form', '(', 'self', ',', 'instance', ',', 'field', ',', 'form', ',', 'empty_marker', '=', 'None', ',', 'emptyReturnsMarker', '=', 'False', ')', ':', 'values', '=', '{', '}', '# Process settings from the reference definition first', 'ref_def', '=', 'form', '.', 'get', '(', '"ReferenceDefinition"', ')', 'ref_def_uid', '=', 'ref_def', 'and', 'ref_def', '[', '0', ']', 'if', 'ref_def_uid', ':', 'ref_def_obj', '=', 'api', '.', 'get_object_by_uid', '(', 'ref_def_uid', ')', 'ref_results', '=', 'ref_def_obj', '.', 'getReferenceResults', '(', ')', '# store reference results by UID to avoid duplicates', 'rr_by_uid', '=', 'dict', '(', 'map', '(', 'lambda', 'r', ':', '(', 'r', '.', 'get', '(', '"uid"', ')', ',', 'r', ')', ',', 'ref_results', ')', ')', 'values', '.', 'update', '(', 'rr_by_uid', ')', '# selected services', 'service_uids', '=', 'form', '.', 'get', '(', '"uids"', ',', '[', ']', ')', 'for', 'uid', 'in', 'service_uids', ':', 'result', '=', 'self', '.', '_get_spec_value', '(', 'form', ',', 'uid', ',', '"result"', ')', 'if', 'not', 'result', ':', '# User has to set a value for result subfield at least', 'continue', '# If neither min nor max have been set, assume we only accept a', '# discrete result (like if % of error was 0).', 's_min', '=', 'self', '.', '_get_spec_value', '(', 'form', ',', 'uid', ',', '"min"', ',', 'result', ')', 's_max', '=', 'self', '.', '_get_spec_value', '(', 'form', ',', 'uid', ',', '"max"', ',', 'result', ')', 'service', '=', 'api', '.', 'get_object_by_uid', '(', 'uid', ')', 'values', '[', 'uid', ']', '=', '{', '"keyword"', ':', 'service', '.', 'getKeyword', '(', ')', ',', '"uid"', ':', 'uid', ',', '"result"', ':', 'result', ',', '"min"', ':', 's_min', ',', '"max"', ':', 's_max', '}', 'return', 'values', '.', 'values', '(', ')', ',', '{', '}'] | Return a list of dictionaries fit for ReferenceResultsField
consumption. Only services which have float()able entries in result,min
and max field will be included. If any of min, max, or result fields
are blank, the row value is ignored here. | ['Return', 'a', 'list', 'of', 'dictionaries', 'fit', 'for', 'ReferenceResultsField', 'consumption', '.', 'Only', 'services', 'which', 'have', 'float', '()', 'able', 'entries', 'in', 'result', 'min', 'and', 'max', 'field', 'will', 'be', 'included', '.', 'If', 'any', 'of', 'min', 'max', 'or', 'result', 'fields', 'are', 'blank', 'the', 'row', 'value', 'is', 'ignored', 'here', '.'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/widgets/referenceresultswidget.py#L197-L239 |
2,036 | Erotemic/utool | utool/util_path.py | touch | def touch(fpath, times=None, verbose=True):
r"""
Creates file if it doesnt exist
Args:
fpath (str): file path
times (None):
verbose (bool):
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> fpath = '?'
>>> times = None
>>> verbose = True
>>> result = touch(fpath, times, verbose)
>>> print(result)
References:
http://stackoverflow.com/questions/1158076/implement-touch-using-python
"""
try:
if verbose:
print('[util_path] touching %r' % fpath)
with open(fpath, 'a'):
os.utime(fpath, times)
except Exception as ex:
import utool
utool.printex(ex, 'touch %s' % fpath)
raise
return fpath | python | def touch(fpath, times=None, verbose=True):
r"""
Creates file if it doesnt exist
Args:
fpath (str): file path
times (None):
verbose (bool):
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> fpath = '?'
>>> times = None
>>> verbose = True
>>> result = touch(fpath, times, verbose)
>>> print(result)
References:
http://stackoverflow.com/questions/1158076/implement-touch-using-python
"""
try:
if verbose:
print('[util_path] touching %r' % fpath)
with open(fpath, 'a'):
os.utime(fpath, times)
except Exception as ex:
import utool
utool.printex(ex, 'touch %s' % fpath)
raise
return fpath | ['def', 'touch', '(', 'fpath', ',', 'times', '=', 'None', ',', 'verbose', '=', 'True', ')', ':', 'try', ':', 'if', 'verbose', ':', 'print', '(', "'[util_path] touching %r'", '%', 'fpath', ')', 'with', 'open', '(', 'fpath', ',', "'a'", ')', ':', 'os', '.', 'utime', '(', 'fpath', ',', 'times', ')', 'except', 'Exception', 'as', 'ex', ':', 'import', 'utool', 'utool', '.', 'printex', '(', 'ex', ',', "'touch %s'", '%', 'fpath', ')', 'raise', 'return', 'fpath'] | r"""
Creates file if it doesnt exist
Args:
fpath (str): file path
times (None):
verbose (bool):
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> fpath = '?'
>>> times = None
>>> verbose = True
>>> result = touch(fpath, times, verbose)
>>> print(result)
References:
http://stackoverflow.com/questions/1158076/implement-touch-using-python | ['r', 'Creates', 'file', 'if', 'it', 'doesnt', 'exist'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L672-L702 |
2,037 | eyurtsev/fcsparser | fcsparser/api.py | FCSParser.get_channel_names | def get_channel_names(self):
"""Get list of channel names. Raises a warning if the names are not unique."""
names_s, names_n = self.channel_names_s, self.channel_names_n
# Figure out which channel names to use
if self._channel_naming == '$PnS':
channel_names, channel_names_alternate = names_s, names_n
else:
channel_names, channel_names_alternate = names_n, names_s
if len(channel_names) == 0:
channel_names = channel_names_alternate
if len(set(channel_names)) != len(channel_names):
msg = (u'The default channel names (defined by the {} '
u'parameter in the FCS file) were not unique. To avoid '
u'problems in downstream analysis, the channel names '
u'have been switched to the alternate channel names '
u'defined in the FCS file. To avoid '
u'seeing this warning message, explicitly instruct '
u'the FCS parser to use the alternate channel names by '
u'specifying the channel_naming parameter.')
msg = msg.format(self._channel_naming)
warnings.warn(msg)
channel_names = channel_names_alternate
return channel_names | python | def get_channel_names(self):
"""Get list of channel names. Raises a warning if the names are not unique."""
names_s, names_n = self.channel_names_s, self.channel_names_n
# Figure out which channel names to use
if self._channel_naming == '$PnS':
channel_names, channel_names_alternate = names_s, names_n
else:
channel_names, channel_names_alternate = names_n, names_s
if len(channel_names) == 0:
channel_names = channel_names_alternate
if len(set(channel_names)) != len(channel_names):
msg = (u'The default channel names (defined by the {} '
u'parameter in the FCS file) were not unique. To avoid '
u'problems in downstream analysis, the channel names '
u'have been switched to the alternate channel names '
u'defined in the FCS file. To avoid '
u'seeing this warning message, explicitly instruct '
u'the FCS parser to use the alternate channel names by '
u'specifying the channel_naming parameter.')
msg = msg.format(self._channel_naming)
warnings.warn(msg)
channel_names = channel_names_alternate
return channel_names | ['def', 'get_channel_names', '(', 'self', ')', ':', 'names_s', ',', 'names_n', '=', 'self', '.', 'channel_names_s', ',', 'self', '.', 'channel_names_n', '# Figure out which channel names to use', 'if', 'self', '.', '_channel_naming', '==', "'$PnS'", ':', 'channel_names', ',', 'channel_names_alternate', '=', 'names_s', ',', 'names_n', 'else', ':', 'channel_names', ',', 'channel_names_alternate', '=', 'names_n', ',', 'names_s', 'if', 'len', '(', 'channel_names', ')', '==', '0', ':', 'channel_names', '=', 'channel_names_alternate', 'if', 'len', '(', 'set', '(', 'channel_names', ')', ')', '!=', 'len', '(', 'channel_names', ')', ':', 'msg', '=', '(', "u'The default channel names (defined by the {} '", "u'parameter in the FCS file) were not unique. To avoid '", "u'problems in downstream analysis, the channel names '", "u'have been switched to the alternate channel names '", "u'defined in the FCS file. To avoid '", "u'seeing this warning message, explicitly instruct '", "u'the FCS parser to use the alternate channel names by '", "u'specifying the channel_naming parameter.'", ')', 'msg', '=', 'msg', '.', 'format', '(', 'self', '.', '_channel_naming', ')', 'warnings', '.', 'warn', '(', 'msg', ')', 'channel_names', '=', 'channel_names_alternate', 'return', 'channel_names'] | Get list of channel names. Raises a warning if the names are not unique. | ['Get', 'list', 'of', 'channel', 'names', '.', 'Raises', 'a', 'warning', 'if', 'the', 'names', 'are', 'not', 'unique', '.'] | train | https://github.com/eyurtsev/fcsparser/blob/710e8e31d4b09ff6e73d47d86770be6ca2f4282c/fcsparser/api.py#L327-L353 |
2,038 | thunder-project/thunder | thunder/series/series.py | Series.max | def max(self):
"""
Compute the max across records.
"""
return self._constructor(self.values.max(axis=self.baseaxes, keepdims=True)) | python | def max(self):
"""
Compute the max across records.
"""
return self._constructor(self.values.max(axis=self.baseaxes, keepdims=True)) | ['def', 'max', '(', 'self', ')', ':', 'return', 'self', '.', '_constructor', '(', 'self', '.', 'values', '.', 'max', '(', 'axis', '=', 'self', '.', 'baseaxes', ',', 'keepdims', '=', 'True', ')', ')'] | Compute the max across records. | ['Compute', 'the', 'max', 'across', 'records', '.'] | train | https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L239-L243 |
2,039 | DavidLP/pilight | pilight/pilight.py | Client.run | def run(self): # Thread for receiving data from pilight
"""Receiver thread function called on Client.start()."""
logging.debug('Pilight receiver thread started')
if not self.callback:
raise RuntimeError('No callback function set, cancel readout thread')
def handle_messages(messages):
"""Call callback on each receive message."""
for message in messages: # Loop over received messages
if message: # Can be empty due to splitlines
message_dict = json.loads(message.decode())
if self.recv_codes_only:
# Filter: Only use receiver messages
if 'receiver' in message_dict['origin']:
if self.veto_repeats:
if message_dict.get('repeats', 1) == 1:
self.callback(message_dict)
else:
self.callback(message_dict)
else:
self.callback(message_dict)
while not self._stop_thread.isSet():
try: # Read socket in a non blocking call and interpret data
# Sometimes more than one JSON object is in the stream thus
# split at \n
with self._lock:
messages = self.receive_socket.recv(1024).splitlines()
handle_messages(messages)
except (socket.timeout, ValueError): # No data
pass
logging.debug('Pilight receiver thread stopped') | python | def run(self): # Thread for receiving data from pilight
"""Receiver thread function called on Client.start()."""
logging.debug('Pilight receiver thread started')
if not self.callback:
raise RuntimeError('No callback function set, cancel readout thread')
def handle_messages(messages):
"""Call callback on each receive message."""
for message in messages: # Loop over received messages
if message: # Can be empty due to splitlines
message_dict = json.loads(message.decode())
if self.recv_codes_only:
# Filter: Only use receiver messages
if 'receiver' in message_dict['origin']:
if self.veto_repeats:
if message_dict.get('repeats', 1) == 1:
self.callback(message_dict)
else:
self.callback(message_dict)
else:
self.callback(message_dict)
while not self._stop_thread.isSet():
try: # Read socket in a non blocking call and interpret data
# Sometimes more than one JSON object is in the stream thus
# split at \n
with self._lock:
messages = self.receive_socket.recv(1024).splitlines()
handle_messages(messages)
except (socket.timeout, ValueError): # No data
pass
logging.debug('Pilight receiver thread stopped') | ['def', 'run', '(', 'self', ')', ':', '# Thread for receiving data from pilight', 'logging', '.', 'debug', '(', "'Pilight receiver thread started'", ')', 'if', 'not', 'self', '.', 'callback', ':', 'raise', 'RuntimeError', '(', "'No callback function set, cancel readout thread'", ')', 'def', 'handle_messages', '(', 'messages', ')', ':', '"""Call callback on each receive message."""', 'for', 'message', 'in', 'messages', ':', '# Loop over received messages', 'if', 'message', ':', '# Can be empty due to splitlines', 'message_dict', '=', 'json', '.', 'loads', '(', 'message', '.', 'decode', '(', ')', ')', 'if', 'self', '.', 'recv_codes_only', ':', '# Filter: Only use receiver messages', 'if', "'receiver'", 'in', 'message_dict', '[', "'origin'", ']', ':', 'if', 'self', '.', 'veto_repeats', ':', 'if', 'message_dict', '.', 'get', '(', "'repeats'", ',', '1', ')', '==', '1', ':', 'self', '.', 'callback', '(', 'message_dict', ')', 'else', ':', 'self', '.', 'callback', '(', 'message_dict', ')', 'else', ':', 'self', '.', 'callback', '(', 'message_dict', ')', 'while', 'not', 'self', '.', '_stop_thread', '.', 'isSet', '(', ')', ':', 'try', ':', '# Read socket in a non blocking call and interpret data', '# Sometimes more than one JSON object is in the stream thus', '# split at \\n', 'with', 'self', '.', '_lock', ':', 'messages', '=', 'self', '.', 'receive_socket', '.', 'recv', '(', '1024', ')', '.', 'splitlines', '(', ')', 'handle_messages', '(', 'messages', ')', 'except', '(', 'socket', '.', 'timeout', ',', 'ValueError', ')', ':', '# No data', 'pass', 'logging', '.', 'debug', '(', "'Pilight receiver thread stopped'", ')'] | Receiver thread function called on Client.start(). | ['Receiver', 'thread', 'function', 'called', 'on', 'Client', '.', 'start', '()', '.'] | train | https://github.com/DavidLP/pilight/blob/a319404034e761892a89c7205b6f1aff6ad8e205/pilight/pilight.py#L123-L154 |
2,040 | Gandi/gandi.cli | gandi/cli/core/conf.py | GandiConfig.load_config | def load_config(cls):
""" Load global and local configuration files and update if needed."""
config_file = os.path.expanduser(cls.home_config)
global_conf = cls.load(config_file, 'global')
cls.load(cls.local_config, 'local')
# update global configuration if needed
cls.update_config(config_file, global_conf) | python | def load_config(cls):
""" Load global and local configuration files and update if needed."""
config_file = os.path.expanduser(cls.home_config)
global_conf = cls.load(config_file, 'global')
cls.load(cls.local_config, 'local')
# update global configuration if needed
cls.update_config(config_file, global_conf) | ['def', 'load_config', '(', 'cls', ')', ':', 'config_file', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'cls', '.', 'home_config', ')', 'global_conf', '=', 'cls', '.', 'load', '(', 'config_file', ',', "'global'", ')', 'cls', '.', 'load', '(', 'cls', '.', 'local_config', ',', "'local'", ')', '# update global configuration if needed', 'cls', '.', 'update_config', '(', 'config_file', ',', 'global_conf', ')'] | Load global and local configuration files and update if needed. | ['Load', 'global', 'and', 'local', 'configuration', 'files', 'and', 'update', 'if', 'needed', '.'] | train | https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/core/conf.py#L40-L46 |
2,041 | hatemile/hatemile-for-python | setup.py | get_packages | def get_packages():
"""
Returns the packages used for HaTeMiLe for Python.
:return: The packages used for HaTeMiLe for Python.
:rtype: list(str)
"""
packages = find_packages(exclude=['tests'])
packages.append('')
packages.append('js')
packages.append(LOCALES_DIRECTORY)
for directory in os.listdir(LOCALES_DIRECTORY):
packages.append(LOCALES_DIRECTORY + '.' + directory)
return packages | python | def get_packages():
"""
Returns the packages used for HaTeMiLe for Python.
:return: The packages used for HaTeMiLe for Python.
:rtype: list(str)
"""
packages = find_packages(exclude=['tests'])
packages.append('')
packages.append('js')
packages.append(LOCALES_DIRECTORY)
for directory in os.listdir(LOCALES_DIRECTORY):
packages.append(LOCALES_DIRECTORY + '.' + directory)
return packages | ['def', 'get_packages', '(', ')', ':', 'packages', '=', 'find_packages', '(', 'exclude', '=', '[', "'tests'", ']', ')', 'packages', '.', 'append', '(', "''", ')', 'packages', '.', 'append', '(', "'js'", ')', 'packages', '.', 'append', '(', 'LOCALES_DIRECTORY', ')', 'for', 'directory', 'in', 'os', '.', 'listdir', '(', 'LOCALES_DIRECTORY', ')', ':', 'packages', '.', 'append', '(', 'LOCALES_DIRECTORY', '+', "'.'", '+', 'directory', ')', 'return', 'packages'] | Returns the packages used for HaTeMiLe for Python.
:return: The packages used for HaTeMiLe for Python.
:rtype: list(str) | ['Returns', 'the', 'packages', 'used', 'for', 'HaTeMiLe', 'for', 'Python', '.'] | train | https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/setup.py#L42-L57 |
2,042 | apache/incubator-mxnet | python/mxnet/autograd.py | get_symbol | def get_symbol(x):
"""Retrieve recorded computation history as `Symbol`.
Parameters
----------
x : NDArray
Array representing the head of computation graph.
Returns
-------
Symbol
The retrieved Symbol.
"""
hdl = SymbolHandle()
check_call(_LIB.MXAutogradGetSymbol(x.handle, ctypes.byref(hdl)))
return Symbol(hdl) | python | def get_symbol(x):
"""Retrieve recorded computation history as `Symbol`.
Parameters
----------
x : NDArray
Array representing the head of computation graph.
Returns
-------
Symbol
The retrieved Symbol.
"""
hdl = SymbolHandle()
check_call(_LIB.MXAutogradGetSymbol(x.handle, ctypes.byref(hdl)))
return Symbol(hdl) | ['def', 'get_symbol', '(', 'x', ')', ':', 'hdl', '=', 'SymbolHandle', '(', ')', 'check_call', '(', '_LIB', '.', 'MXAutogradGetSymbol', '(', 'x', '.', 'handle', ',', 'ctypes', '.', 'byref', '(', 'hdl', ')', ')', ')', 'return', 'Symbol', '(', 'hdl', ')'] | Retrieve recorded computation history as `Symbol`.
Parameters
----------
x : NDArray
Array representing the head of computation graph.
Returns
-------
Symbol
The retrieved Symbol. | ['Retrieve', 'recorded', 'computation', 'history', 'as', 'Symbol', '.'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L347-L362 |
2,043 | KnuVerse/knuverse-sdk-python | knuverse/knufactor.py | Knufactor.verification_start | def verification_start(
self,
client,
mode=None,
verification_speed=None,
row_doubling="off",
phone_number=None,
):
"""
Start a verification. Uses POST to /verifications interface.
:Args:
* *client*: (str) Client's Name
* *mode*: (str) Verification Mode. Allowed values: "audiopin", "audiopass"
* *verification_speed*: (int) Allowed values: 0, 25, 50, 75, 100
* *row_doubling*: (str) Allowed values: "off", "train", "on"
* *phone_number*: (str) Phone number to call.
:Returns: (dict) Verification record with animation as discussed `here <https://cloud.knuverse.com/docs/api/#api-Verifications-Start_verification>`_.
"""
data = {
"name": client,
"user_agent": "knuverse-sdk-python-v%s" % self.version
}
if mode:
data["mode"] = mode
if phone_number:
data["phone_number"] = phone_number
if verification_speed:
data["verification_speed"] = verification_speed
if row_doubling:
data["row_doubling"] = row_doubling
response = self._post(url.verifications, body=data)
self._check_response(response, 201)
return self._create_response(response) | python | def verification_start(
self,
client,
mode=None,
verification_speed=None,
row_doubling="off",
phone_number=None,
):
"""
Start a verification. Uses POST to /verifications interface.
:Args:
* *client*: (str) Client's Name
* *mode*: (str) Verification Mode. Allowed values: "audiopin", "audiopass"
* *verification_speed*: (int) Allowed values: 0, 25, 50, 75, 100
* *row_doubling*: (str) Allowed values: "off", "train", "on"
* *phone_number*: (str) Phone number to call.
:Returns: (dict) Verification record with animation as discussed `here <https://cloud.knuverse.com/docs/api/#api-Verifications-Start_verification>`_.
"""
data = {
"name": client,
"user_agent": "knuverse-sdk-python-v%s" % self.version
}
if mode:
data["mode"] = mode
if phone_number:
data["phone_number"] = phone_number
if verification_speed:
data["verification_speed"] = verification_speed
if row_doubling:
data["row_doubling"] = row_doubling
response = self._post(url.verifications, body=data)
self._check_response(response, 201)
return self._create_response(response) | ['def', 'verification_start', '(', 'self', ',', 'client', ',', 'mode', '=', 'None', ',', 'verification_speed', '=', 'None', ',', 'row_doubling', '=', '"off"', ',', 'phone_number', '=', 'None', ',', ')', ':', 'data', '=', '{', '"name"', ':', 'client', ',', '"user_agent"', ':', '"knuverse-sdk-python-v%s"', '%', 'self', '.', 'version', '}', 'if', 'mode', ':', 'data', '[', '"mode"', ']', '=', 'mode', 'if', 'phone_number', ':', 'data', '[', '"phone_number"', ']', '=', 'phone_number', 'if', 'verification_speed', ':', 'data', '[', '"verification_speed"', ']', '=', 'verification_speed', 'if', 'row_doubling', ':', 'data', '[', '"row_doubling"', ']', '=', 'row_doubling', 'response', '=', 'self', '.', '_post', '(', 'url', '.', 'verifications', ',', 'body', '=', 'data', ')', 'self', '.', '_check_response', '(', 'response', ',', '201', ')', 'return', 'self', '.', '_create_response', '(', 'response', ')'] | Start a verification. Uses POST to /verifications interface.
:Args:
* *client*: (str) Client's Name
* *mode*: (str) Verification Mode. Allowed values: "audiopin", "audiopass"
* *verification_speed*: (int) Allowed values: 0, 25, 50, 75, 100
* *row_doubling*: (str) Allowed values: "off", "train", "on"
* *phone_number*: (str) Phone number to call.
:Returns: (dict) Verification record with animation as discussed `here <https://cloud.knuverse.com/docs/api/#api-Verifications-Start_verification>`_. | ['Start', 'a', 'verification', '.', 'Uses', 'POST', 'to', '/', 'verifications', 'interface', '.'] | train | https://github.com/KnuVerse/knuverse-sdk-python/blob/00f1275a452a4dcf9bc92ef345f6985504226d8e/knuverse/knufactor.py#L821-L860 |
2,044 | luismsgomes/openfile | src/openfile.py | openfile | def openfile(filename, mode="rt", *args, expanduser=False, expandvars=False,
makedirs=False, **kwargs):
"""Open filename and return a corresponding file object."""
if filename in ("-", None):
return sys.stdin if "r" in mode else sys.stdout
if expanduser:
filename = os.path.expanduser(filename)
if expandvars:
filename = os.path.expandvars(filename)
if makedirs and ("a" in mode or "w" in mode):
parentdir = os.path.dirname(filename)
if not os.path.isdir(parentdir):
os.makedirs(parentdir)
if filename.endswith(".gz"):
if gzip is None:
raise NotImplementedError
_open = gzip.open
elif filename.endswith(".bz2"):
if bz2 is None:
raise NotImplementedError
_open = bz2.open
elif filename.endswith(".xz") or filename.endswith(".lzma"):
if lzma is None:
raise NotImplementedError
_open = lzma.open
else:
_open = open
return _open(filename, mode, *args, **kwargs) | python | def openfile(filename, mode="rt", *args, expanduser=False, expandvars=False,
makedirs=False, **kwargs):
"""Open filename and return a corresponding file object."""
if filename in ("-", None):
return sys.stdin if "r" in mode else sys.stdout
if expanduser:
filename = os.path.expanduser(filename)
if expandvars:
filename = os.path.expandvars(filename)
if makedirs and ("a" in mode or "w" in mode):
parentdir = os.path.dirname(filename)
if not os.path.isdir(parentdir):
os.makedirs(parentdir)
if filename.endswith(".gz"):
if gzip is None:
raise NotImplementedError
_open = gzip.open
elif filename.endswith(".bz2"):
if bz2 is None:
raise NotImplementedError
_open = bz2.open
elif filename.endswith(".xz") or filename.endswith(".lzma"):
if lzma is None:
raise NotImplementedError
_open = lzma.open
else:
_open = open
return _open(filename, mode, *args, **kwargs) | ['def', 'openfile', '(', 'filename', ',', 'mode', '=', '"rt"', ',', '*', 'args', ',', 'expanduser', '=', 'False', ',', 'expandvars', '=', 'False', ',', 'makedirs', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'if', 'filename', 'in', '(', '"-"', ',', 'None', ')', ':', 'return', 'sys', '.', 'stdin', 'if', '"r"', 'in', 'mode', 'else', 'sys', '.', 'stdout', 'if', 'expanduser', ':', 'filename', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'filename', ')', 'if', 'expandvars', ':', 'filename', '=', 'os', '.', 'path', '.', 'expandvars', '(', 'filename', ')', 'if', 'makedirs', 'and', '(', '"a"', 'in', 'mode', 'or', '"w"', 'in', 'mode', ')', ':', 'parentdir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'filename', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'parentdir', ')', ':', 'os', '.', 'makedirs', '(', 'parentdir', ')', 'if', 'filename', '.', 'endswith', '(', '".gz"', ')', ':', 'if', 'gzip', 'is', 'None', ':', 'raise', 'NotImplementedError', '_open', '=', 'gzip', '.', 'open', 'elif', 'filename', '.', 'endswith', '(', '".bz2"', ')', ':', 'if', 'bz2', 'is', 'None', ':', 'raise', 'NotImplementedError', '_open', '=', 'bz2', '.', 'open', 'elif', 'filename', '.', 'endswith', '(', '".xz"', ')', 'or', 'filename', '.', 'endswith', '(', '".lzma"', ')', ':', 'if', 'lzma', 'is', 'None', ':', 'raise', 'NotImplementedError', '_open', '=', 'lzma', '.', 'open', 'else', ':', '_open', '=', 'open', 'return', '_open', '(', 'filename', ',', 'mode', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Open filename and return a corresponding file object. | ['Open', 'filename', 'and', 'return', 'a', 'corresponding', 'file', 'object', '.'] | train | https://github.com/luismsgomes/openfile/blob/08b3679967e5a7e026656db917d5ed92241d612a/src/openfile.py#L30-L57 |
2,045 | JoseAntFer/pyny3d | pyny3d/geoms.py | root.plot | def plot(self, color='default', ret=False, ax=None):
"""
Generates a basic 3D visualization.
:param color: Polygons color.
:type color: matplotlib color, 'default' or 't' (transparent)
:param ret: If True, returns the figure. It can be used to add
more elements to the plot or to modify it.
:type ret: bool
:param ax: If a matplotlib axes given, this method will
represent the plot on top of this axes. This is used to
represent multiple plots from multiple geometries,
overlapping them recursively.
:type ax: mplot3d.Axes3D, None
:returns: None, axes
:rtype: mplot3d.Axes3D, bool
"""
import matplotlib.pylab as plt
import mpl_toolkits.mplot3d as mplot3d
# Bypass a plot
if color == False:
if ax is None: ax = mplot3d.Axes3D(fig=plt.figure())
return ax
# Clone and extract the information from the object
obj = self.__class__(**self.get_seed())
plotable3d = obj.get_plotable3d()
# Domain
domain = obj.get_domain()
bound = np.max(domain[1]-domain[0])
centroid = obj.get_centroid()
pos = np.vstack((centroid-bound/2, centroid+bound/2))
# Cascade plot?
if ax is None: # Non cascade
ax = mplot3d.Axes3D(fig=plt.figure())
else:
old_pos = np.array([ax.get_xbound(),
ax.get_ybound(),
ax.get_zbound()]).T
pos = np.dstack((pos, old_pos))
pos = np.array([np.min(pos[0, :, :], axis=1),
np.max(pos[1, :, :], axis=1)])
# Plot
if color == 'default': color = 't'
if color == 't': color = (0,0,0,0)
for polygon in plotable3d:
polygon.set_facecolor(color)
polygon.set_edgecolor('k')
ax.add_collection3d(polygon)
# Axis limits
ax.set_xlim3d(left=pos[0,0], right=pos[1,0])
ax.set_ylim3d(bottom=pos[0,1], top=pos[1,1])
ax.set_zlim3d(bottom=pos[0,2], top=pos[1,2])
if ret: return ax | python | def plot(self, color='default', ret=False, ax=None):
"""
Generates a basic 3D visualization.
:param color: Polygons color.
:type color: matplotlib color, 'default' or 't' (transparent)
:param ret: If True, returns the figure. It can be used to add
more elements to the plot or to modify it.
:type ret: bool
:param ax: If a matplotlib axes given, this method will
represent the plot on top of this axes. This is used to
represent multiple plots from multiple geometries,
overlapping them recursively.
:type ax: mplot3d.Axes3D, None
:returns: None, axes
:rtype: mplot3d.Axes3D, bool
"""
import matplotlib.pylab as plt
import mpl_toolkits.mplot3d as mplot3d
# Bypass a plot
if color == False:
if ax is None: ax = mplot3d.Axes3D(fig=plt.figure())
return ax
# Clone and extract the information from the object
obj = self.__class__(**self.get_seed())
plotable3d = obj.get_plotable3d()
# Domain
domain = obj.get_domain()
bound = np.max(domain[1]-domain[0])
centroid = obj.get_centroid()
pos = np.vstack((centroid-bound/2, centroid+bound/2))
# Cascade plot?
if ax is None: # Non cascade
ax = mplot3d.Axes3D(fig=plt.figure())
else:
old_pos = np.array([ax.get_xbound(),
ax.get_ybound(),
ax.get_zbound()]).T
pos = np.dstack((pos, old_pos))
pos = np.array([np.min(pos[0, :, :], axis=1),
np.max(pos[1, :, :], axis=1)])
# Plot
if color == 'default': color = 't'
if color == 't': color = (0,0,0,0)
for polygon in plotable3d:
polygon.set_facecolor(color)
polygon.set_edgecolor('k')
ax.add_collection3d(polygon)
# Axis limits
ax.set_xlim3d(left=pos[0,0], right=pos[1,0])
ax.set_ylim3d(bottom=pos[0,1], top=pos[1,1])
ax.set_zlim3d(bottom=pos[0,2], top=pos[1,2])
if ret: return ax | ['def', 'plot', '(', 'self', ',', 'color', '=', "'default'", ',', 'ret', '=', 'False', ',', 'ax', '=', 'None', ')', ':', 'import', 'matplotlib', '.', 'pylab', 'as', 'plt', 'import', 'mpl_toolkits', '.', 'mplot3d', 'as', 'mplot3d', '# Bypass a plot\r', 'if', 'color', '==', 'False', ':', 'if', 'ax', 'is', 'None', ':', 'ax', '=', 'mplot3d', '.', 'Axes3D', '(', 'fig', '=', 'plt', '.', 'figure', '(', ')', ')', 'return', 'ax', '# Clone and extract the information from the object\r', 'obj', '=', 'self', '.', '__class__', '(', '*', '*', 'self', '.', 'get_seed', '(', ')', ')', 'plotable3d', '=', 'obj', '.', 'get_plotable3d', '(', ')', '# Domain\r', 'domain', '=', 'obj', '.', 'get_domain', '(', ')', 'bound', '=', 'np', '.', 'max', '(', 'domain', '[', '1', ']', '-', 'domain', '[', '0', ']', ')', 'centroid', '=', 'obj', '.', 'get_centroid', '(', ')', 'pos', '=', 'np', '.', 'vstack', '(', '(', 'centroid', '-', 'bound', '/', '2', ',', 'centroid', '+', 'bound', '/', '2', ')', ')', '# Cascade plot?\r', 'if', 'ax', 'is', 'None', ':', '# Non cascade\r', 'ax', '=', 'mplot3d', '.', 'Axes3D', '(', 'fig', '=', 'plt', '.', 'figure', '(', ')', ')', 'else', ':', 'old_pos', '=', 'np', '.', 'array', '(', '[', 'ax', '.', 'get_xbound', '(', ')', ',', 'ax', '.', 'get_ybound', '(', ')', ',', 'ax', '.', 'get_zbound', '(', ')', ']', ')', '.', 'T', 'pos', '=', 'np', '.', 'dstack', '(', '(', 'pos', ',', 'old_pos', ')', ')', 'pos', '=', 'np', '.', 'array', '(', '[', 'np', '.', 'min', '(', 'pos', '[', '0', ',', ':', ',', ':', ']', ',', 'axis', '=', '1', ')', ',', 'np', '.', 'max', '(', 'pos', '[', '1', ',', ':', ',', ':', ']', ',', 'axis', '=', '1', ')', ']', ')', '# Plot\r', 'if', 'color', '==', "'default'", ':', 'color', '=', "'t'", 'if', 'color', '==', "'t'", ':', 'color', '=', '(', '0', ',', '0', ',', '0', ',', '0', ')', 'for', 'polygon', 'in', 'plotable3d', ':', 'polygon', '.', 'set_facecolor', '(', 'color', ')', 'polygon', '.', 'set_edgecolor', '(', "'k'", ')', 'ax', '.', 'add_collection3d', '(', 'polygon', ')', '# Axis limits\r', 'ax', '.', 'set_xlim3d', '(', 'left', '=', 'pos', '[', '0', ',', '0', ']', ',', 'right', '=', 'pos', '[', '1', ',', '0', ']', ')', 'ax', '.', 'set_ylim3d', '(', 'bottom', '=', 'pos', '[', '0', ',', '1', ']', ',', 'top', '=', 'pos', '[', '1', ',', '1', ']', ')', 'ax', '.', 'set_zlim3d', '(', 'bottom', '=', 'pos', '[', '0', ',', '2', ']', ',', 'top', '=', 'pos', '[', '1', ',', '2', ']', ')', 'if', 'ret', ':', 'return', 'ax'] | Generates a basic 3D visualization.
:param color: Polygons color.
:type color: matplotlib color, 'default' or 't' (transparent)
:param ret: If True, returns the figure. It can be used to add
more elements to the plot or to modify it.
:type ret: bool
:param ax: If a matplotlib axes given, this method will
represent the plot on top of this axes. This is used to
represent multiple plots from multiple geometries,
overlapping them recursively.
:type ax: mplot3d.Axes3D, None
:returns: None, axes
:rtype: mplot3d.Axes3D, bool | ['Generates', 'a', 'basic', '3D', 'visualization', '.', ':', 'param', 'color', ':', 'Polygons', 'color', '.', ':', 'type', 'color', ':', 'matplotlib', 'color', 'default', 'or', 't', '(', 'transparent', ')', ':', 'param', 'ret', ':', 'If', 'True', 'returns', 'the', 'figure', '.', 'It', 'can', 'be', 'used', 'to', 'add', 'more', 'elements', 'to', 'the', 'plot', 'or', 'to', 'modify', 'it', '.', ':', 'type', 'ret', ':', 'bool', ':', 'param', 'ax', ':', 'If', 'a', 'matplotlib', 'axes', 'given', 'this', 'method', 'will', 'represent', 'the', 'plot', 'on', 'top', 'of', 'this', 'axes', '.', 'This', 'is', 'used', 'to', 'represent', 'multiple', 'plots', 'from', 'multiple', 'geometries', 'overlapping', 'them', 'recursively', '.', ':', 'type', 'ax', ':', 'mplot3d', '.', 'Axes3D', 'None', ':', 'returns', ':', 'None', 'axes', ':', 'rtype', ':', 'mplot3d', '.', 'Axes3D', 'bool'] | train | https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L25-L85 |
2,046 | watson-developer-cloud/python-sdk | ibm_watson/discovery_v1.py | RetrievalDetails._to_dict | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_retrieval_strategy'
) and self.document_retrieval_strategy is not None:
_dict[
'document_retrieval_strategy'] = self.document_retrieval_strategy
return _dict | python | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_retrieval_strategy'
) and self.document_retrieval_strategy is not None:
_dict[
'document_retrieval_strategy'] = self.document_retrieval_strategy
return _dict | ['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'document_retrieval_strategy'", ')', 'and', 'self', '.', 'document_retrieval_strategy', 'is', 'not', 'None', ':', '_dict', '[', "'document_retrieval_strategy'", ']', '=', 'self', '.', 'document_retrieval_strategy', 'return', '_dict'] | Return a json dictionary representing this model. | ['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L10160-L10167 |
2,047 | mgoral/subconvert | src/subconvert/gui/tools/Synchronizer.py | Synchronizer._changeSubNos | def _changeSubNos(self, path, subNos, count, action, reverse=False):
"""Implementation of subs add/removal handling.
Args:
path: file path associated with model on which work is done
subNos: list of added/removed subtitle numbers
count: function which accepts current sync point's subtitle number
and subNos and returns anything based on these values
action: action performed for each of sync point's subtitle number.
Accepts current SyncPoint.subNo, count result, model and
row:
def action(current, count, model, row)
"""
model = self._models.get(path)
if model is None:
return
syncPoints = _syncPoints(model)
syncSubNos = [p.subNo for p in syncPoints]
syncSubNos.sort()
if len(syncSubNos) == 0:
return
for current in syncSubNos:
row = _findRow(current, model)
action(current, count(current, subNos), model, row) | python | def _changeSubNos(self, path, subNos, count, action, reverse=False):
"""Implementation of subs add/removal handling.
Args:
path: file path associated with model on which work is done
subNos: list of added/removed subtitle numbers
count: function which accepts current sync point's subtitle number
and subNos and returns anything based on these values
action: action performed for each of sync point's subtitle number.
Accepts current SyncPoint.subNo, count result, model and
row:
def action(current, count, model, row)
"""
model = self._models.get(path)
if model is None:
return
syncPoints = _syncPoints(model)
syncSubNos = [p.subNo for p in syncPoints]
syncSubNos.sort()
if len(syncSubNos) == 0:
return
for current in syncSubNos:
row = _findRow(current, model)
action(current, count(current, subNos), model, row) | ['def', '_changeSubNos', '(', 'self', ',', 'path', ',', 'subNos', ',', 'count', ',', 'action', ',', 'reverse', '=', 'False', ')', ':', 'model', '=', 'self', '.', '_models', '.', 'get', '(', 'path', ')', 'if', 'model', 'is', 'None', ':', 'return', 'syncPoints', '=', '_syncPoints', '(', 'model', ')', 'syncSubNos', '=', '[', 'p', '.', 'subNo', 'for', 'p', 'in', 'syncPoints', ']', 'syncSubNos', '.', 'sort', '(', ')', 'if', 'len', '(', 'syncSubNos', ')', '==', '0', ':', 'return', 'for', 'current', 'in', 'syncSubNos', ':', 'row', '=', '_findRow', '(', 'current', ',', 'model', ')', 'action', '(', 'current', ',', 'count', '(', 'current', ',', 'subNos', ')', ',', 'model', ',', 'row', ')'] | Implementation of subs add/removal handling.
Args:
path: file path associated with model on which work is done
subNos: list of added/removed subtitle numbers
count: function which accepts current sync point's subtitle number
and subNos and returns anything based on these values
action: action performed for each of sync point's subtitle number.
Accepts current SyncPoint.subNo, count result, model and
row:
def action(current, count, model, row) | ['Implementation', 'of', 'subs', 'add', '/', 'removal', 'handling', '.'] | train | https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/gui/tools/Synchronizer.py#L255-L281 |
2,048 | witchard/grole | grole.py | Request._readline | async def _readline(self, reader):
"""
Readline helper
"""
ret = await reader.readline()
if len(ret) == 0 and reader.at_eof():
raise EOFError()
return ret | python | async def _readline(self, reader):
"""
Readline helper
"""
ret = await reader.readline()
if len(ret) == 0 and reader.at_eof():
raise EOFError()
return ret | ['async', 'def', '_readline', '(', 'self', ',', 'reader', ')', ':', 'ret', '=', 'await', 'reader', '.', 'readline', '(', ')', 'if', 'len', '(', 'ret', ')', '==', '0', 'and', 'reader', '.', 'at_eof', '(', ')', ':', 'raise', 'EOFError', '(', ')', 'return', 'ret'] | Readline helper | ['Readline', 'helper'] | train | https://github.com/witchard/grole/blob/54c0bd13e4d4c74a2997ec4254527d937d6e0565/grole.py#L68-L75 |
2,049 | snobear/ezmomi | ezmomi/ezmomi.py | EZMomi.status | def status(self):
"""Check power status"""
vm = self.get_vm_failfast(self.config['name'])
extra = self.config['extra']
parserFriendly = self.config['parserFriendly']
status_to_print = []
if extra:
status_to_print = \
[["vmname", "powerstate", "ipaddress", "hostname", "memory",
"cpunum", "uuid", "guestid", "uptime"]] + \
[[vm.name, vm.runtime.powerState,
vm.summary.guest.ipAddress or '',
vm.summary.guest.hostName or '',
str(vm.summary.config.memorySizeMB),
str(vm.summary.config.numCpu),
vm.summary.config.uuid, vm.summary.guest.guestId,
str(vm.summary.quickStats.uptimeSeconds) or '0']]
else:
status_to_print = [[vm.name, vm.runtime.powerState]]
if parserFriendly:
self.print_as_lines(status_to_print)
else:
self.print_as_table(status_to_print) | python | def status(self):
"""Check power status"""
vm = self.get_vm_failfast(self.config['name'])
extra = self.config['extra']
parserFriendly = self.config['parserFriendly']
status_to_print = []
if extra:
status_to_print = \
[["vmname", "powerstate", "ipaddress", "hostname", "memory",
"cpunum", "uuid", "guestid", "uptime"]] + \
[[vm.name, vm.runtime.powerState,
vm.summary.guest.ipAddress or '',
vm.summary.guest.hostName or '',
str(vm.summary.config.memorySizeMB),
str(vm.summary.config.numCpu),
vm.summary.config.uuid, vm.summary.guest.guestId,
str(vm.summary.quickStats.uptimeSeconds) or '0']]
else:
status_to_print = [[vm.name, vm.runtime.powerState]]
if parserFriendly:
self.print_as_lines(status_to_print)
else:
self.print_as_table(status_to_print) | ['def', 'status', '(', 'self', ')', ':', 'vm', '=', 'self', '.', 'get_vm_failfast', '(', 'self', '.', 'config', '[', "'name'", ']', ')', 'extra', '=', 'self', '.', 'config', '[', "'extra'", ']', 'parserFriendly', '=', 'self', '.', 'config', '[', "'parserFriendly'", ']', 'status_to_print', '=', '[', ']', 'if', 'extra', ':', 'status_to_print', '=', '[', '[', '"vmname"', ',', '"powerstate"', ',', '"ipaddress"', ',', '"hostname"', ',', '"memory"', ',', '"cpunum"', ',', '"uuid"', ',', '"guestid"', ',', '"uptime"', ']', ']', '+', '[', '[', 'vm', '.', 'name', ',', 'vm', '.', 'runtime', '.', 'powerState', ',', 'vm', '.', 'summary', '.', 'guest', '.', 'ipAddress', 'or', "''", ',', 'vm', '.', 'summary', '.', 'guest', '.', 'hostName', 'or', "''", ',', 'str', '(', 'vm', '.', 'summary', '.', 'config', '.', 'memorySizeMB', ')', ',', 'str', '(', 'vm', '.', 'summary', '.', 'config', '.', 'numCpu', ')', ',', 'vm', '.', 'summary', '.', 'config', '.', 'uuid', ',', 'vm', '.', 'summary', '.', 'guest', '.', 'guestId', ',', 'str', '(', 'vm', '.', 'summary', '.', 'quickStats', '.', 'uptimeSeconds', ')', 'or', "'0'", ']', ']', 'else', ':', 'status_to_print', '=', '[', '[', 'vm', '.', 'name', ',', 'vm', '.', 'runtime', '.', 'powerState', ']', ']', 'if', 'parserFriendly', ':', 'self', '.', 'print_as_lines', '(', 'status_to_print', ')', 'else', ':', 'self', '.', 'print_as_table', '(', 'status_to_print', ')'] | Check power status | ['Check', 'power', 'status'] | train | https://github.com/snobear/ezmomi/blob/c98e26dc2d32cd5c92134fdcbcb8353540ac0208/ezmomi/ezmomi.py#L522-L546 |
2,050 | awslabs/sockeye | sockeye/encoder.py | EncoderSequence.encode | def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
for encoder in self.encoders:
data, data_length, seq_len = encoder.encode(data, data_length, seq_len)
return data, data_length, seq_len | python | def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
for encoder in self.encoders:
data, data_length, seq_len = encoder.encode(data, data_length, seq_len)
return data, data_length, seq_len | ['def', 'encode', '(', 'self', ',', 'data', ':', 'mx', '.', 'sym', '.', 'Symbol', ',', 'data_length', ':', 'mx', '.', 'sym', '.', 'Symbol', ',', 'seq_len', ':', 'int', ')', '->', 'Tuple', '[', 'mx', '.', 'sym', '.', 'Symbol', ',', 'mx', '.', 'sym', '.', 'Symbol', ',', 'int', ']', ':', 'for', 'encoder', 'in', 'self', '.', 'encoders', ':', 'data', ',', 'data_length', ',', 'seq_len', '=', 'encoder', '.', 'encode', '(', 'data', ',', 'data_length', ',', 'seq_len', ')', 'return', 'data', ',', 'data_length', ',', 'seq_len'] | Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len). | ['Encodes', 'data', 'given', 'sequence', 'lengths', 'of', 'individual', 'examples', 'and', 'maximum', 'sequence', 'length', '.'] | train | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/encoder.py#L717-L731 |
2,051 | basho/riak-python-client | riak/mapreduce.py | RiakMapReduce.add_key_filter | def add_key_filter(self, *args):
"""
Add a single key filter to the inputs.
:param args: a filter
:type args: list
:rtype: :class:`RiakMapReduce`
"""
if self._input_mode == 'query':
raise ValueError('Key filters are not supported in a query.')
self._key_filters.append(args)
return self | python | def add_key_filter(self, *args):
"""
Add a single key filter to the inputs.
:param args: a filter
:type args: list
:rtype: :class:`RiakMapReduce`
"""
if self._input_mode == 'query':
raise ValueError('Key filters are not supported in a query.')
self._key_filters.append(args)
return self | ['def', 'add_key_filter', '(', 'self', ',', '*', 'args', ')', ':', 'if', 'self', '.', '_input_mode', '==', "'query'", ':', 'raise', 'ValueError', '(', "'Key filters are not supported in a query.'", ')', 'self', '.', '_key_filters', '.', 'append', '(', 'args', ')', 'return', 'self'] | Add a single key filter to the inputs.
:param args: a filter
:type args: list
:rtype: :class:`RiakMapReduce` | ['Add', 'a', 'single', 'key', 'filter', 'to', 'the', 'inputs', '.'] | train | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/mapreduce.py#L160-L172 |
2,052 | Neurosim-lab/netpyne | netpyne/support/recxelectrode.py | RecXElectrode.calcTransferResistance | def calcTransferResistance(self, gid, seg_coords):
"""Precompute mapping from segment to electrode locations"""
sigma = 0.3 # mS/mm
# Value used in NEURON extracellular recording example ("extracellular_stim_and_rec")
# rho = 35.4 # ohm cm, squid axon cytoplasm = 2.8249e-2 S/cm = 0.028 S/cm = 0.0028 S/mm = 2.8 mS/mm
# rho_um = 35.4 * 0.01 = 35.4 / 1e6 * 1e4 = 0.354 Mohm um ~= 3 uS / um = 3000 uS / mm = 3 mS /mm
# equivalent sigma value (~3) is 10x larger than Allen (0.3)
# if use same sigma value, results are consistent
r05 = (seg_coords['p0'] + seg_coords['p1'])/2
dl = seg_coords['p1'] - seg_coords['p0']
nseg = r05.shape[1]
tr = np.zeros((self.nsites,nseg))
# tr_NEURON = np.zeros((self.nsites,nseg)) # used to compare with NEURON extracellular example
for j in range(self.nsites): # calculate mapping for each site on the electrode
rel = np.expand_dims(self.pos[:, j], axis=1) # coordinates of a j-th site on the electrode
rel_05 = rel - r05 # distance between electrode and segment centers
r2 = np.einsum('ij,ij->j', rel_05, rel_05) # compute dot product column-wise, the resulting array has as many columns as original
rlldl = np.einsum('ij,ij->j', rel_05, dl) # compute dot product column-wise, the resulting array has as many columns as original
dlmag = np.linalg.norm(dl, axis=0) # length of each segment
rll = abs(rlldl/dlmag) # component of r parallel to the segment axis it must be always positive
rT2 = r2 - rll**2 # square of perpendicular component
up = rll + dlmag/2
low = rll - dlmag/2
num = up + np.sqrt(up**2 + rT2)
den = low + np.sqrt(low**2 + rT2)
tr[j, :] = np.log(num/den)/dlmag # units of (1/um) use with imemb_ (total seg current)
# Consistent with NEURON extracellular recording example
# r = np.sqrt(rel_05[0,:]**2 + rel_05[1,:]**2 + rel_05[2,:]**2)
# tr_NEURON[j, :] = (rho / 4 / math.pi)*(1/r)*0.01
tr *= 1/(4*math.pi*sigma) # units: 1/um / (mS/mm) = mm/um / mS = 1e3 * kOhm = MOhm
self.transferResistances[gid] = tr | python | def calcTransferResistance(self, gid, seg_coords):
"""Precompute mapping from segment to electrode locations"""
sigma = 0.3 # mS/mm
# Value used in NEURON extracellular recording example ("extracellular_stim_and_rec")
# rho = 35.4 # ohm cm, squid axon cytoplasm = 2.8249e-2 S/cm = 0.028 S/cm = 0.0028 S/mm = 2.8 mS/mm
# rho_um = 35.4 * 0.01 = 35.4 / 1e6 * 1e4 = 0.354 Mohm um ~= 3 uS / um = 3000 uS / mm = 3 mS /mm
# equivalent sigma value (~3) is 10x larger than Allen (0.3)
# if use same sigma value, results are consistent
r05 = (seg_coords['p0'] + seg_coords['p1'])/2
dl = seg_coords['p1'] - seg_coords['p0']
nseg = r05.shape[1]
tr = np.zeros((self.nsites,nseg))
# tr_NEURON = np.zeros((self.nsites,nseg)) # used to compare with NEURON extracellular example
for j in range(self.nsites): # calculate mapping for each site on the electrode
rel = np.expand_dims(self.pos[:, j], axis=1) # coordinates of a j-th site on the electrode
rel_05 = rel - r05 # distance between electrode and segment centers
r2 = np.einsum('ij,ij->j', rel_05, rel_05) # compute dot product column-wise, the resulting array has as many columns as original
rlldl = np.einsum('ij,ij->j', rel_05, dl) # compute dot product column-wise, the resulting array has as many columns as original
dlmag = np.linalg.norm(dl, axis=0) # length of each segment
rll = abs(rlldl/dlmag) # component of r parallel to the segment axis it must be always positive
rT2 = r2 - rll**2 # square of perpendicular component
up = rll + dlmag/2
low = rll - dlmag/2
num = up + np.sqrt(up**2 + rT2)
den = low + np.sqrt(low**2 + rT2)
tr[j, :] = np.log(num/den)/dlmag # units of (1/um) use with imemb_ (total seg current)
# Consistent with NEURON extracellular recording example
# r = np.sqrt(rel_05[0,:]**2 + rel_05[1,:]**2 + rel_05[2,:]**2)
# tr_NEURON[j, :] = (rho / 4 / math.pi)*(1/r)*0.01
tr *= 1/(4*math.pi*sigma) # units: 1/um / (mS/mm) = mm/um / mS = 1e3 * kOhm = MOhm
self.transferResistances[gid] = tr | ['def', 'calcTransferResistance', '(', 'self', ',', 'gid', ',', 'seg_coords', ')', ':', 'sigma', '=', '0.3', '# mS/mm ', '# Value used in NEURON extracellular recording example ("extracellular_stim_and_rec")', '# rho = 35.4 # ohm cm, squid axon cytoplasm = 2.8249e-2 S/cm = 0.028 S/cm = 0.0028 S/mm = 2.8 mS/mm ', '# rho_um = 35.4 * 0.01 = 35.4 / 1e6 * 1e4 = 0.354 Mohm um ~= 3 uS / um = 3000 uS / mm = 3 mS /mm', '# equivalent sigma value (~3) is 10x larger than Allen (0.3) ', '# if use same sigma value, results are consistent', 'r05', '=', '(', 'seg_coords', '[', "'p0'", ']', '+', 'seg_coords', '[', "'p1'", ']', ')', '/', '2', 'dl', '=', 'seg_coords', '[', "'p1'", ']', '-', 'seg_coords', '[', "'p0'", ']', 'nseg', '=', 'r05', '.', 'shape', '[', '1', ']', 'tr', '=', 'np', '.', 'zeros', '(', '(', 'self', '.', 'nsites', ',', 'nseg', ')', ')', '# tr_NEURON = np.zeros((self.nsites,nseg)) # used to compare with NEURON extracellular example', 'for', 'j', 'in', 'range', '(', 'self', '.', 'nsites', ')', ':', '# calculate mapping for each site on the electrode', 'rel', '=', 'np', '.', 'expand_dims', '(', 'self', '.', 'pos', '[', ':', ',', 'j', ']', ',', 'axis', '=', '1', ')', '# coordinates of a j-th site on the electrode', 'rel_05', '=', 'rel', '-', 'r05', '# distance between electrode and segment centers', 'r2', '=', 'np', '.', 'einsum', '(', "'ij,ij->j'", ',', 'rel_05', ',', 'rel_05', ')', '# compute dot product column-wise, the resulting array has as many columns as original', 'rlldl', '=', 'np', '.', 'einsum', '(', "'ij,ij->j'", ',', 'rel_05', ',', 'dl', ')', '# compute dot product column-wise, the resulting array has as many columns as original', 'dlmag', '=', 'np', '.', 'linalg', '.', 'norm', '(', 'dl', ',', 'axis', '=', '0', ')', '# length of each segment', 'rll', '=', 'abs', '(', 'rlldl', '/', 'dlmag', ')', '# component of r parallel to the segment axis it must be always positive', 'rT2', '=', 'r2', '-', 'rll', '**', '2', '# square of perpendicular component', 'up', '=', 'rll', '+', 'dlmag', '/', '2', 'low', '=', 'rll', '-', 'dlmag', '/', '2', 'num', '=', 'up', '+', 'np', '.', 'sqrt', '(', 'up', '**', '2', '+', 'rT2', ')', 'den', '=', 'low', '+', 'np', '.', 'sqrt', '(', 'low', '**', '2', '+', 'rT2', ')', 'tr', '[', 'j', ',', ':', ']', '=', 'np', '.', 'log', '(', 'num', '/', 'den', ')', '/', 'dlmag', '# units of (1/um) use with imemb_ (total seg current)', '# Consistent with NEURON extracellular recording example', '# r = np.sqrt(rel_05[0,:]**2 + rel_05[1,:]**2 + rel_05[2,:]**2)', '# tr_NEURON[j, :] = (rho / 4 / math.pi)*(1/r)*0.01', 'tr', '*=', '1', '/', '(', '4', '*', 'math', '.', 'pi', '*', 'sigma', ')', '# units: 1/um / (mS/mm) = mm/um / mS = 1e3 * kOhm = MOhm', 'self', '.', 'transferResistances', '[', 'gid', ']', '=', 'tr'] | Precompute mapping from segment to electrode locations | ['Precompute', 'mapping', 'from', 'segment', 'to', 'electrode', 'locations'] | train | https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/recxelectrode.py#L67-L105 |
2,053 | GNS3/gns3-server | gns3server/compute/vmware/vmware_vm.py | VMwareVM.adapters | def adapters(self, adapters):
"""
Sets the number of Ethernet adapters for this VMware VM instance.
:param adapters: number of adapters
"""
# VMware VMs are limited to 10 adapters
if adapters > 10:
raise VMwareError("Number of adapters above the maximum supported of 10")
self._ethernet_adapters.clear()
for adapter_number in range(0, adapters):
self._ethernet_adapters[adapter_number] = EthernetAdapter()
self._adapters = len(self._ethernet_adapters)
log.info("VMware VM '{name}' [{id}] has changed the number of Ethernet adapters to {adapters}".format(name=self.name,
id=self.id,
adapters=adapters)) | python | def adapters(self, adapters):
"""
Sets the number of Ethernet adapters for this VMware VM instance.
:param adapters: number of adapters
"""
# VMware VMs are limited to 10 adapters
if adapters > 10:
raise VMwareError("Number of adapters above the maximum supported of 10")
self._ethernet_adapters.clear()
for adapter_number in range(0, adapters):
self._ethernet_adapters[adapter_number] = EthernetAdapter()
self._adapters = len(self._ethernet_adapters)
log.info("VMware VM '{name}' [{id}] has changed the number of Ethernet adapters to {adapters}".format(name=self.name,
id=self.id,
adapters=adapters)) | ['def', 'adapters', '(', 'self', ',', 'adapters', ')', ':', '# VMware VMs are limited to 10 adapters', 'if', 'adapters', '>', '10', ':', 'raise', 'VMwareError', '(', '"Number of adapters above the maximum supported of 10"', ')', 'self', '.', '_ethernet_adapters', '.', 'clear', '(', ')', 'for', 'adapter_number', 'in', 'range', '(', '0', ',', 'adapters', ')', ':', 'self', '.', '_ethernet_adapters', '[', 'adapter_number', ']', '=', 'EthernetAdapter', '(', ')', 'self', '.', '_adapters', '=', 'len', '(', 'self', '.', '_ethernet_adapters', ')', 'log', '.', 'info', '(', '"VMware VM \'{name}\' [{id}] has changed the number of Ethernet adapters to {adapters}"', '.', 'format', '(', 'name', '=', 'self', '.', 'name', ',', 'id', '=', 'self', '.', 'id', ',', 'adapters', '=', 'adapters', ')', ')'] | Sets the number of Ethernet adapters for this VMware VM instance.
:param adapters: number of adapters | ['Sets', 'the', 'number', 'of', 'Ethernet', 'adapters', 'for', 'this', 'VMware', 'VM', 'instance', '.'] | train | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vmware/vmware_vm.py#L655-L673 |
2,054 | tkhyn/dirsync | dirsync/syncer.py | Syncer.do_work | def do_work(self):
""" Do work """
self._starttime = time.time()
if not os.path.isdir(self._dir2):
if self._maketarget:
if self._verbose:
self.log('Creating directory %s' % self._dir2)
try:
os.makedirs(self._dir2)
self._numnewdirs += 1
except Exception as e:
self.log(str(e))
return None
# All right!
self._mainfunc()
self._endtime = time.time() | python | def do_work(self):
""" Do work """
self._starttime = time.time()
if not os.path.isdir(self._dir2):
if self._maketarget:
if self._verbose:
self.log('Creating directory %s' % self._dir2)
try:
os.makedirs(self._dir2)
self._numnewdirs += 1
except Exception as e:
self.log(str(e))
return None
# All right!
self._mainfunc()
self._endtime = time.time() | ['def', 'do_work', '(', 'self', ')', ':', 'self', '.', '_starttime', '=', 'time', '.', 'time', '(', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'self', '.', '_dir2', ')', ':', 'if', 'self', '.', '_maketarget', ':', 'if', 'self', '.', '_verbose', ':', 'self', '.', 'log', '(', "'Creating directory %s'", '%', 'self', '.', '_dir2', ')', 'try', ':', 'os', '.', 'makedirs', '(', 'self', '.', '_dir2', ')', 'self', '.', '_numnewdirs', '+=', '1', 'except', 'Exception', 'as', 'e', ':', 'self', '.', 'log', '(', 'str', '(', 'e', ')', ')', 'return', 'None', '# All right!', 'self', '.', '_mainfunc', '(', ')', 'self', '.', '_endtime', '=', 'time', '.', 'time', '(', ')'] | Do work | ['Do', 'work'] | train | https://github.com/tkhyn/dirsync/blob/a461a6c31a4cf521c1b6a8bcfcd8602e6288e8ce/dirsync/syncer.py#L183-L201 |
2,055 | saltstack/salt | salt/states/net_napalm_yang.py | managed | def managed(name,
data,
**kwargs):
'''
Manage the device configuration given the input data structured
according to the YANG models.
data
YANG structured data.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
compliance_report: ``False``
Return the compliance report in the comment.
.. versionadded:: 2017.7.3
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
State SLS example:
.. code-block:: jinja
{%- set expected_config = pillar.get('openconfig_interfaces_cfg') -%}
interfaces_config:
napalm_yang.managed:
- data: {{ expected_config | json }}
- models:
- models.openconfig_interfaces
- debug: true
Pillar example:
.. code-block:: yaml
openconfig_interfaces_cfg:
_kwargs:
filter: true
interfaces:
interface:
Et1:
config:
mtu: 9000
Et2:
config:
description: "description example"
'''
models = kwargs.get('models', None)
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
ret = salt.utils.napalm.default_ret(name)
test = kwargs.get('test', False) or __opts__.get('test', False)
debug = kwargs.get('debug', False) or __opts__.get('debug', False)
commit = kwargs.get('commit', True) or __opts__.get('commit', True)
replace = kwargs.get('replace', False) or __opts__.get('replace', False)
return_compliance_report = kwargs.get('compliance_report', False) or __opts__.get('compliance_report', False)
profiles = kwargs.get('profiles', [])
temp_file = __salt__['temp.file']()
log.debug('Creating temp file: %s', temp_file)
if 'to_dict' not in data:
data = {'to_dict': data}
data = [data]
with salt.utils.files.fopen(temp_file, 'w') as file_handle:
salt.utils.yaml.safe_dump(
salt.utils.json.loads(salt.utils.json.dumps(data)),
file_handle,
encoding='utf-8'
)
device_config = __salt__['napalm_yang.parse'](*models,
config=True,
profiles=profiles)
log.debug('Parsed the config from the device:')
log.debug(device_config)
compliance_report = __salt__['napalm_yang.compliance_report'](device_config,
*models,
filepath=temp_file)
log.debug('Compliance report:')
log.debug(compliance_report)
complies = compliance_report.get('complies', False)
if complies:
ret.update({
'result': True,
'comment': 'Already configured as required.'
})
log.debug('All good here.')
return ret
log.debug('Does not comply, trying to generate and load config')
data = data[0]['to_dict']
if '_kwargs' in data:
data.pop('_kwargs')
loaded_changes = __salt__['napalm_yang.load_config'](data,
*models,
profiles=profiles,
test=test,
debug=debug,
commit=commit,
replace=replace)
log.debug('Loaded config result:')
log.debug(loaded_changes)
__salt__['file.remove'](temp_file)
loaded_changes['compliance_report'] = compliance_report
return salt.utils.napalm.loaded_ret(ret,
loaded_changes,
test,
debug,
opts=__opts__,
compliance_report=return_compliance_report) | python | def managed(name,
data,
**kwargs):
'''
Manage the device configuration given the input data structured
according to the YANG models.
data
YANG structured data.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
compliance_report: ``False``
Return the compliance report in the comment.
.. versionadded:: 2017.7.3
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
State SLS example:
.. code-block:: jinja
{%- set expected_config = pillar.get('openconfig_interfaces_cfg') -%}
interfaces_config:
napalm_yang.managed:
- data: {{ expected_config | json }}
- models:
- models.openconfig_interfaces
- debug: true
Pillar example:
.. code-block:: yaml
openconfig_interfaces_cfg:
_kwargs:
filter: true
interfaces:
interface:
Et1:
config:
mtu: 9000
Et2:
config:
description: "description example"
'''
models = kwargs.get('models', None)
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
ret = salt.utils.napalm.default_ret(name)
test = kwargs.get('test', False) or __opts__.get('test', False)
debug = kwargs.get('debug', False) or __opts__.get('debug', False)
commit = kwargs.get('commit', True) or __opts__.get('commit', True)
replace = kwargs.get('replace', False) or __opts__.get('replace', False)
return_compliance_report = kwargs.get('compliance_report', False) or __opts__.get('compliance_report', False)
profiles = kwargs.get('profiles', [])
temp_file = __salt__['temp.file']()
log.debug('Creating temp file: %s', temp_file)
if 'to_dict' not in data:
data = {'to_dict': data}
data = [data]
with salt.utils.files.fopen(temp_file, 'w') as file_handle:
salt.utils.yaml.safe_dump(
salt.utils.json.loads(salt.utils.json.dumps(data)),
file_handle,
encoding='utf-8'
)
device_config = __salt__['napalm_yang.parse'](*models,
config=True,
profiles=profiles)
log.debug('Parsed the config from the device:')
log.debug(device_config)
compliance_report = __salt__['napalm_yang.compliance_report'](device_config,
*models,
filepath=temp_file)
log.debug('Compliance report:')
log.debug(compliance_report)
complies = compliance_report.get('complies', False)
if complies:
ret.update({
'result': True,
'comment': 'Already configured as required.'
})
log.debug('All good here.')
return ret
log.debug('Does not comply, trying to generate and load config')
data = data[0]['to_dict']
if '_kwargs' in data:
data.pop('_kwargs')
loaded_changes = __salt__['napalm_yang.load_config'](data,
*models,
profiles=profiles,
test=test,
debug=debug,
commit=commit,
replace=replace)
log.debug('Loaded config result:')
log.debug(loaded_changes)
__salt__['file.remove'](temp_file)
loaded_changes['compliance_report'] = compliance_report
return salt.utils.napalm.loaded_ret(ret,
loaded_changes,
test,
debug,
opts=__opts__,
compliance_report=return_compliance_report) | ['def', 'managed', '(', 'name', ',', 'data', ',', '*', '*', 'kwargs', ')', ':', 'models', '=', 'kwargs', '.', 'get', '(', "'models'", ',', 'None', ')', 'if', 'isinstance', '(', 'models', ',', 'tuple', ')', 'and', 'isinstance', '(', 'models', '[', '0', ']', ',', 'list', ')', ':', 'models', '=', 'models', '[', '0', ']', 'ret', '=', 'salt', '.', 'utils', '.', 'napalm', '.', 'default_ret', '(', 'name', ')', 'test', '=', 'kwargs', '.', 'get', '(', "'test'", ',', 'False', ')', 'or', '__opts__', '.', 'get', '(', "'test'", ',', 'False', ')', 'debug', '=', 'kwargs', '.', 'get', '(', "'debug'", ',', 'False', ')', 'or', '__opts__', '.', 'get', '(', "'debug'", ',', 'False', ')', 'commit', '=', 'kwargs', '.', 'get', '(', "'commit'", ',', 'True', ')', 'or', '__opts__', '.', 'get', '(', "'commit'", ',', 'True', ')', 'replace', '=', 'kwargs', '.', 'get', '(', "'replace'", ',', 'False', ')', 'or', '__opts__', '.', 'get', '(', "'replace'", ',', 'False', ')', 'return_compliance_report', '=', 'kwargs', '.', 'get', '(', "'compliance_report'", ',', 'False', ')', 'or', '__opts__', '.', 'get', '(', "'compliance_report'", ',', 'False', ')', 'profiles', '=', 'kwargs', '.', 'get', '(', "'profiles'", ',', '[', ']', ')', 'temp_file', '=', '__salt__', '[', "'temp.file'", ']', '(', ')', 'log', '.', 'debug', '(', "'Creating temp file: %s'", ',', 'temp_file', ')', 'if', "'to_dict'", 'not', 'in', 'data', ':', 'data', '=', '{', "'to_dict'", ':', 'data', '}', 'data', '=', '[', 'data', ']', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'temp_file', ',', "'w'", ')', 'as', 'file_handle', ':', 'salt', '.', 'utils', '.', 'yaml', '.', 'safe_dump', '(', 'salt', '.', 'utils', '.', 'json', '.', 'loads', '(', 'salt', '.', 'utils', '.', 'json', '.', 'dumps', '(', 'data', ')', ')', ',', 'file_handle', ',', 'encoding', '=', "'utf-8'", ')', 'device_config', '=', '__salt__', '[', "'napalm_yang.parse'", ']', '(', '*', 'models', ',', 'config', '=', 'True', ',', 'profiles', '=', 'profiles', ')', 'log', '.', 'debug', '(', "'Parsed the config from the device:'", ')', 'log', '.', 'debug', '(', 'device_config', ')', 'compliance_report', '=', '__salt__', '[', "'napalm_yang.compliance_report'", ']', '(', 'device_config', ',', '*', 'models', ',', 'filepath', '=', 'temp_file', ')', 'log', '.', 'debug', '(', "'Compliance report:'", ')', 'log', '.', 'debug', '(', 'compliance_report', ')', 'complies', '=', 'compliance_report', '.', 'get', '(', "'complies'", ',', 'False', ')', 'if', 'complies', ':', 'ret', '.', 'update', '(', '{', "'result'", ':', 'True', ',', "'comment'", ':', "'Already configured as required.'", '}', ')', 'log', '.', 'debug', '(', "'All good here.'", ')', 'return', 'ret', 'log', '.', 'debug', '(', "'Does not comply, trying to generate and load config'", ')', 'data', '=', 'data', '[', '0', ']', '[', "'to_dict'", ']', 'if', "'_kwargs'", 'in', 'data', ':', 'data', '.', 'pop', '(', "'_kwargs'", ')', 'loaded_changes', '=', '__salt__', '[', "'napalm_yang.load_config'", ']', '(', 'data', ',', '*', 'models', ',', 'profiles', '=', 'profiles', ',', 'test', '=', 'test', ',', 'debug', '=', 'debug', ',', 'commit', '=', 'commit', ',', 'replace', '=', 'replace', ')', 'log', '.', 'debug', '(', "'Loaded config result:'", ')', 'log', '.', 'debug', '(', 'loaded_changes', ')', '__salt__', '[', "'file.remove'", ']', '(', 'temp_file', ')', 'loaded_changes', '[', "'compliance_report'", ']', '=', 'compliance_report', 'return', 'salt', '.', 'utils', '.', 'napalm', '.', 'loaded_ret', '(', 'ret', ',', 'loaded_changes', ',', 'test', ',', 'debug', ',', 'opts', '=', '__opts__', ',', 'compliance_report', '=', 'return_compliance_report', ')'] | Manage the device configuration given the input data structured
according to the YANG models.
data
YANG structured data.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
compliance_report: ``False``
Return the compliance report in the comment.
.. versionadded:: 2017.7.3
test: ``False``
Dry run? If set as ``True``, will apply the config, discard
and return the changes. Default: ``False`` and will commit
the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
State SLS example:
.. code-block:: jinja
{%- set expected_config = pillar.get('openconfig_interfaces_cfg') -%}
interfaces_config:
napalm_yang.managed:
- data: {{ expected_config | json }}
- models:
- models.openconfig_interfaces
- debug: true
Pillar example:
.. code-block:: yaml
openconfig_interfaces_cfg:
_kwargs:
filter: true
interfaces:
interface:
Et1:
config:
mtu: 9000
Et2:
config:
description: "description example" | ['Manage', 'the', 'device', 'configuration', 'given', 'the', 'input', 'data', 'structured', 'according', 'to', 'the', 'YANG', 'models', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/net_napalm_yang.py#L78-L202 |
2,056 | Robin8Put/pmes | storage/rpc_methods.py | StorageTable.mailed_confirm | async def mailed_confirm(self, **params):
"""Sends mail to user after offer receiveing
Accepts:
- cid
- buyer address
- price
- offer_type
- point
- coinid
"""
if not params:
return {"error":400, "reason":"Missed required fields"}
# Check if required fields exists
cid = params.get("cid")
buyer_address = params.get("buyer_address")
price = params.get("price")
offer_type = params.get("offer_type")
coinid = params.get("coinid").upper()
try:
coinid = coinid.replace("TEST", "")
except:
pass
# Check if required fileds
if not all([cid, buyer_address, price]):
return {"error":400, "reason":"Missed required fields"}
# Get content owner address
#if coinid in settings.AVAILABLE_COIN_ID:
# client_bridge.endpoint = settings.bridges[coinid]
#else:
# return {"error":400, "reason":"Invalid coin ID"}
#owneraddr = await client_bridge.request(method_name="ownerbycid", cid=cid)
# Send appropriate mail to seller if exists
#seller = await getaccountbywallet(wallet=owneraddr)
#logging.debug(seller)
#if "error" in seller.keys():
# return seller
#if seller.get("email"):
# emaildata = {
# "to": seller["email"],
# "subject": "Robin8 support",
# "optional": "You`ve got a new offer from %s" % seller["public_key"]
#
# }
# await client_email.request(method_name="sendmail", **emaildata)
# Send news for seller
buyer = await getaccountbywallet(wallet=buyer_address)
if "error" in buyer.keys():
buyer["public_key"] = None
newsdata = {
"event_type":"made offer",
"cid": cid,
"access_string":buyer["public_key"],
"buyer_pubkey":buyer["public_key"],
"buyer_address":buyer_address,
#"owneraddr":owneraddr,
"price": price,
"offer_type": offer_type,
"coinid":coinid
}
news = await self.insert_news(**newsdata)
return {"result":"ok"} | python | async def mailed_confirm(self, **params):
"""Sends mail to user after offer receiveing
Accepts:
- cid
- buyer address
- price
- offer_type
- point
- coinid
"""
if not params:
return {"error":400, "reason":"Missed required fields"}
# Check if required fields exists
cid = params.get("cid")
buyer_address = params.get("buyer_address")
price = params.get("price")
offer_type = params.get("offer_type")
coinid = params.get("coinid").upper()
try:
coinid = coinid.replace("TEST", "")
except:
pass
# Check if required fileds
if not all([cid, buyer_address, price]):
return {"error":400, "reason":"Missed required fields"}
# Get content owner address
#if coinid in settings.AVAILABLE_COIN_ID:
# client_bridge.endpoint = settings.bridges[coinid]
#else:
# return {"error":400, "reason":"Invalid coin ID"}
#owneraddr = await client_bridge.request(method_name="ownerbycid", cid=cid)
# Send appropriate mail to seller if exists
#seller = await getaccountbywallet(wallet=owneraddr)
#logging.debug(seller)
#if "error" in seller.keys():
# return seller
#if seller.get("email"):
# emaildata = {
# "to": seller["email"],
# "subject": "Robin8 support",
# "optional": "You`ve got a new offer from %s" % seller["public_key"]
#
# }
# await client_email.request(method_name="sendmail", **emaildata)
# Send news for seller
buyer = await getaccountbywallet(wallet=buyer_address)
if "error" in buyer.keys():
buyer["public_key"] = None
newsdata = {
"event_type":"made offer",
"cid": cid,
"access_string":buyer["public_key"],
"buyer_pubkey":buyer["public_key"],
"buyer_address":buyer_address,
#"owneraddr":owneraddr,
"price": price,
"offer_type": offer_type,
"coinid":coinid
}
news = await self.insert_news(**newsdata)
return {"result":"ok"} | ['async', 'def', 'mailed_confirm', '(', 'self', ',', '*', '*', 'params', ')', ':', 'if', 'not', 'params', ':', 'return', '{', '"error"', ':', '400', ',', '"reason"', ':', '"Missed required fields"', '}', '# Check if required fields exists', 'cid', '=', 'params', '.', 'get', '(', '"cid"', ')', 'buyer_address', '=', 'params', '.', 'get', '(', '"buyer_address"', ')', 'price', '=', 'params', '.', 'get', '(', '"price"', ')', 'offer_type', '=', 'params', '.', 'get', '(', '"offer_type"', ')', 'coinid', '=', 'params', '.', 'get', '(', '"coinid"', ')', '.', 'upper', '(', ')', 'try', ':', 'coinid', '=', 'coinid', '.', 'replace', '(', '"TEST"', ',', '""', ')', 'except', ':', 'pass', '# Check if required fileds ', 'if', 'not', 'all', '(', '[', 'cid', ',', 'buyer_address', ',', 'price', ']', ')', ':', 'return', '{', '"error"', ':', '400', ',', '"reason"', ':', '"Missed required fields"', '}', '# Get content owner address', '#if coinid in settings.AVAILABLE_COIN_ID:', '#\tclient_bridge.endpoint = settings.bridges[coinid]', '#else:', '#\treturn {"error":400, "reason":"Invalid coin ID"}', '#owneraddr = await client_bridge.request(method_name="ownerbycid", cid=cid)', '# Send appropriate mail to seller if exists', '#seller = await getaccountbywallet(wallet=owneraddr)', '#logging.debug(seller)', '#if "error" in seller.keys():', '#\treturn seller', '#if seller.get("email"):', '#\temaildata = {', '#\t\t"to": seller["email"],', '#\t\t"subject": "Robin8 support",', '#\t\t"optional": "You`ve got a new offer from %s" % seller["public_key"]', '#', '#\t}', '#\tawait client_email.request(method_name="sendmail", **emaildata)', '# Send news for seller', 'buyer', '=', 'await', 'getaccountbywallet', '(', 'wallet', '=', 'buyer_address', ')', 'if', '"error"', 'in', 'buyer', '.', 'keys', '(', ')', ':', 'buyer', '[', '"public_key"', ']', '=', 'None', 'newsdata', '=', '{', '"event_type"', ':', '"made offer"', ',', '"cid"', ':', 'cid', ',', '"access_string"', ':', 'buyer', '[', '"public_key"', ']', ',', '"buyer_pubkey"', ':', 'buyer', '[', '"public_key"', ']', ',', '"buyer_address"', ':', 'buyer_address', ',', '#"owneraddr":owneraddr,', '"price"', ':', 'price', ',', '"offer_type"', ':', 'offer_type', ',', '"coinid"', ':', 'coinid', '}', 'news', '=', 'await', 'self', '.', 'insert_news', '(', '*', '*', 'newsdata', ')', 'return', '{', '"result"', ':', '"ok"', '}'] | Sends mail to user after offer receiveing
Accepts:
- cid
- buyer address
- price
- offer_type
- point
- coinid | ['Sends', 'mail', 'to', 'user', 'after', 'offer', 'receiveing', 'Accepts', ':', '-', 'cid', '-', 'buyer', 'address', '-', 'price', '-', 'offer_type', '-', 'point', '-', 'coinid'] | train | https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/storage/rpc_methods.py#L461-L530 |
2,057 | pytroll/satpy | satpy/multiscene.py | stack | def stack(datasets):
"""First dataset at the bottom."""
base = datasets[0].copy()
for dataset in datasets[1:]:
base = base.where(dataset.isnull(), dataset)
return base | python | def stack(datasets):
"""First dataset at the bottom."""
base = datasets[0].copy()
for dataset in datasets[1:]:
base = base.where(dataset.isnull(), dataset)
return base | ['def', 'stack', '(', 'datasets', ')', ':', 'base', '=', 'datasets', '[', '0', ']', '.', 'copy', '(', ')', 'for', 'dataset', 'in', 'datasets', '[', '1', ':', ']', ':', 'base', '=', 'base', '.', 'where', '(', 'dataset', '.', 'isnull', '(', ')', ',', 'dataset', ')', 'return', 'base'] | First dataset at the bottom. | ['First', 'dataset', 'at', 'the', 'bottom', '.'] | train | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/multiscene.py#L56-L61 |
2,058 | has2k1/plydata | plydata/dataframe/helpers.py | _make_verb_helper | def _make_verb_helper(verb_func, add_groups=False):
"""
Create function that prepares verb for the verb function
The functions created add expressions to be evaluated to
the verb, then call the core verb function
Parameters
----------
verb_func : function
Core verb function. This is the function called after
expressions created and added to the verb. The core
function should be one of those that implement verbs that
evaluate expressions.
add_groups : bool
If True, a groups attribute is added to the verb. The
groups are the columns created after evaluating the
expressions.
Returns
-------
out : function
A function that implements a helper verb.
"""
@wraps(verb_func)
def _verb_func(verb):
verb.expressions, new_columns = build_expressions(verb)
if add_groups:
verb.groups = new_columns
return verb_func(verb)
return _verb_func | python | def _make_verb_helper(verb_func, add_groups=False):
"""
Create function that prepares verb for the verb function
The functions created add expressions to be evaluated to
the verb, then call the core verb function
Parameters
----------
verb_func : function
Core verb function. This is the function called after
expressions created and added to the verb. The core
function should be one of those that implement verbs that
evaluate expressions.
add_groups : bool
If True, a groups attribute is added to the verb. The
groups are the columns created after evaluating the
expressions.
Returns
-------
out : function
A function that implements a helper verb.
"""
@wraps(verb_func)
def _verb_func(verb):
verb.expressions, new_columns = build_expressions(verb)
if add_groups:
verb.groups = new_columns
return verb_func(verb)
return _verb_func | ['def', '_make_verb_helper', '(', 'verb_func', ',', 'add_groups', '=', 'False', ')', ':', '@', 'wraps', '(', 'verb_func', ')', 'def', '_verb_func', '(', 'verb', ')', ':', 'verb', '.', 'expressions', ',', 'new_columns', '=', 'build_expressions', '(', 'verb', ')', 'if', 'add_groups', ':', 'verb', '.', 'groups', '=', 'new_columns', 'return', 'verb_func', '(', 'verb', ')', 'return', '_verb_func'] | Create function that prepares verb for the verb function
The functions created add expressions to be evaluated to
the verb, then call the core verb function
Parameters
----------
verb_func : function
Core verb function. This is the function called after
expressions created and added to the verb. The core
function should be one of those that implement verbs that
evaluate expressions.
add_groups : bool
If True, a groups attribute is added to the verb. The
groups are the columns created after evaluating the
expressions.
Returns
-------
out : function
A function that implements a helper verb. | ['Create', 'function', 'that', 'prepares', 'verb', 'for', 'the', 'verb', 'function'] | train | https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/helpers.py#L156-L188 |
2,059 | allenai/allennlp | allennlp/common/file_utils.py | split_s3_path | def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path | python | def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path | ['def', 'split_s3_path', '(', 'url', ':', 'str', ')', '->', 'Tuple', '[', 'str', ',', 'str', ']', ':', 'parsed', '=', 'urlparse', '(', 'url', ')', 'if', 'not', 'parsed', '.', 'netloc', 'or', 'not', 'parsed', '.', 'path', ':', 'raise', 'ValueError', '(', '"bad s3 path {}"', '.', 'format', '(', 'url', ')', ')', 'bucket_name', '=', 'parsed', '.', 'netloc', 's3_path', '=', 'parsed', '.', 'path', "# Remove '/' at beginning of path.", 'if', 's3_path', '.', 'startswith', '(', '"/"', ')', ':', 's3_path', '=', 's3_path', '[', '1', ':', ']', 'return', 'bucket_name', ',', 's3_path'] | Split a full s3 path into the bucket name and path. | ['Split', 'a', 'full', 's3', 'path', 'into', 'the', 'bucket', 'name', 'and', 'path', '.'] | train | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/file_utils.py#L120-L130 |
2,060 | tanghaibao/jcvi | jcvi/annotation/ahrd.py | merge | def merge(args):
"""
%prog merge output/*.csv > ahrd.csv
Merge AHRD results, remove redundant headers, empty lines, etc. If there are
multiple lines containing the same ID (first column). Then whatever comes
the first will get retained.
"""
p = OptionParser(merge.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
csvfiles = args
cf = csvfiles[0]
fp = open(cf)
for row in fp:
if row.startswith("Protein"):
break
header = row.rstrip()
print(header)
seen = set()
for cf in csvfiles:
fp = open(cf)
for row in fp:
if row[0] == '#':
continue
if row.strip() == "":
continue
if row.strip() == header:
continue
atoms = row.rstrip().split("\t")
id = atoms[0]
if id in seen:
logging.error("ID `{0}` ignored.".format(id))
continue
seen.add(id)
print(row.strip()) | python | def merge(args):
"""
%prog merge output/*.csv > ahrd.csv
Merge AHRD results, remove redundant headers, empty lines, etc. If there are
multiple lines containing the same ID (first column). Then whatever comes
the first will get retained.
"""
p = OptionParser(merge.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
csvfiles = args
cf = csvfiles[0]
fp = open(cf)
for row in fp:
if row.startswith("Protein"):
break
header = row.rstrip()
print(header)
seen = set()
for cf in csvfiles:
fp = open(cf)
for row in fp:
if row[0] == '#':
continue
if row.strip() == "":
continue
if row.strip() == header:
continue
atoms = row.rstrip().split("\t")
id = atoms[0]
if id in seen:
logging.error("ID `{0}` ignored.".format(id))
continue
seen.add(id)
print(row.strip()) | ['def', 'merge', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'merge', '.', '__doc__', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '<', '1', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'csvfiles', '=', 'args', 'cf', '=', 'csvfiles', '[', '0', ']', 'fp', '=', 'open', '(', 'cf', ')', 'for', 'row', 'in', 'fp', ':', 'if', 'row', '.', 'startswith', '(', '"Protein"', ')', ':', 'break', 'header', '=', 'row', '.', 'rstrip', '(', ')', 'print', '(', 'header', ')', 'seen', '=', 'set', '(', ')', 'for', 'cf', 'in', 'csvfiles', ':', 'fp', '=', 'open', '(', 'cf', ')', 'for', 'row', 'in', 'fp', ':', 'if', 'row', '[', '0', ']', '==', "'#'", ':', 'continue', 'if', 'row', '.', 'strip', '(', ')', '==', '""', ':', 'continue', 'if', 'row', '.', 'strip', '(', ')', '==', 'header', ':', 'continue', 'atoms', '=', 'row', '.', 'rstrip', '(', ')', '.', 'split', '(', '"\\t"', ')', 'id', '=', 'atoms', '[', '0', ']', 'if', 'id', 'in', 'seen', ':', 'logging', '.', 'error', '(', '"ID `{0}` ignored."', '.', 'format', '(', 'id', ')', ')', 'continue', 'seen', '.', 'add', '(', 'id', ')', 'print', '(', 'row', '.', 'strip', '(', ')', ')'] | %prog merge output/*.csv > ahrd.csv
Merge AHRD results, remove redundant headers, empty lines, etc. If there are
multiple lines containing the same ID (first column). Then whatever comes
the first will get retained. | ['%prog', 'merge', 'output', '/', '*', '.', 'csv', '>', 'ahrd', '.', 'csv'] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/ahrd.py#L568-L609 |
2,061 | saltstack/salt | salt/modules/win_dsc.py | get_config_status | def get_config_status():
'''
Get the status of the current DSC Configuration
Returns:
dict: A dictionary representing the status of the current DSC
Configuration on the machine
CLI Example:
.. code-block:: bash
salt '*' dsc.get_config_status
'''
cmd = 'Get-DscConfigurationStatus | ' \
'Select-Object -Property HostName, Status, MetaData, ' \
'@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \
'Type, Mode, RebootRequested, NumberofResources'
try:
return _pshell(cmd, ignore_retcode=True)
except CommandExecutionError as exc:
if 'No status information available' in exc.info['stderr']:
raise CommandExecutionError('Not Configured')
raise | python | def get_config_status():
'''
Get the status of the current DSC Configuration
Returns:
dict: A dictionary representing the status of the current DSC
Configuration on the machine
CLI Example:
.. code-block:: bash
salt '*' dsc.get_config_status
'''
cmd = 'Get-DscConfigurationStatus | ' \
'Select-Object -Property HostName, Status, MetaData, ' \
'@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \
'Type, Mode, RebootRequested, NumberofResources'
try:
return _pshell(cmd, ignore_retcode=True)
except CommandExecutionError as exc:
if 'No status information available' in exc.info['stderr']:
raise CommandExecutionError('Not Configured')
raise | ['def', 'get_config_status', '(', ')', ':', 'cmd', '=', "'Get-DscConfigurationStatus | '", "'Select-Object -Property HostName, Status, MetaData, '", '\'@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, \'', "'Type, Mode, RebootRequested, NumberofResources'", 'try', ':', 'return', '_pshell', '(', 'cmd', ',', 'ignore_retcode', '=', 'True', ')', 'except', 'CommandExecutionError', 'as', 'exc', ':', 'if', "'No status information available'", 'in', 'exc', '.', 'info', '[', "'stderr'", ']', ':', 'raise', 'CommandExecutionError', '(', "'Not Configured'", ')', 'raise'] | Get the status of the current DSC Configuration
Returns:
dict: A dictionary representing the status of the current DSC
Configuration on the machine
CLI Example:
.. code-block:: bash
salt '*' dsc.get_config_status | ['Get', 'the', 'status', 'of', 'the', 'current', 'DSC', 'Configuration'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dsc.py#L589-L612 |
2,062 | PmagPy/PmagPy | programs/demag_gui.py | Demag_GUI.update_warning_box | def update_warning_box(self):
"""
updates the warning box with whatever the warning_text variable
contains for this specimen
"""
self.warning_box.Clear()
if self.warning_text == "":
self.warning_box.AppendText("No Problems")
else:
self.warning_box.AppendText(self.warning_text) | python | def update_warning_box(self):
"""
updates the warning box with whatever the warning_text variable
contains for this specimen
"""
self.warning_box.Clear()
if self.warning_text == "":
self.warning_box.AppendText("No Problems")
else:
self.warning_box.AppendText(self.warning_text) | ['def', 'update_warning_box', '(', 'self', ')', ':', 'self', '.', 'warning_box', '.', 'Clear', '(', ')', 'if', 'self', '.', 'warning_text', '==', '""', ':', 'self', '.', 'warning_box', '.', 'AppendText', '(', '"No Problems"', ')', 'else', ':', 'self', '.', 'warning_box', '.', 'AppendText', '(', 'self', '.', 'warning_text', ')'] | updates the warning box with whatever the warning_text variable
contains for this specimen | ['updates', 'the', 'warning', 'box', 'with', 'whatever', 'the', 'warning_text', 'variable', 'contains', 'for', 'this', 'specimen'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L6155-L6164 |
2,063 | gabstopper/smc-python | smc/vpn/route.py | RouteVPN.set_preshared_key | def set_preshared_key(self, new_key):
"""
Set the preshared key for this VPN. A pre-shared key is only
present when the tunnel type is 'VPN' or the encryption mode
is 'transport'.
:return: None
"""
if self.data.get('preshared_key'):
self.update(preshared_key=new_key) | python | def set_preshared_key(self, new_key):
"""
Set the preshared key for this VPN. A pre-shared key is only
present when the tunnel type is 'VPN' or the encryption mode
is 'transport'.
:return: None
"""
if self.data.get('preshared_key'):
self.update(preshared_key=new_key) | ['def', 'set_preshared_key', '(', 'self', ',', 'new_key', ')', ':', 'if', 'self', '.', 'data', '.', 'get', '(', "'preshared_key'", ')', ':', 'self', '.', 'update', '(', 'preshared_key', '=', 'new_key', ')'] | Set the preshared key for this VPN. A pre-shared key is only
present when the tunnel type is 'VPN' or the encryption mode
is 'transport'.
:return: None | ['Set', 'the', 'preshared', 'key', 'for', 'this', 'VPN', '.', 'A', 'pre', '-', 'shared', 'key', 'is', 'only', 'present', 'when', 'the', 'tunnel', 'type', 'is', 'VPN', 'or', 'the', 'encryption', 'mode', 'is', 'transport', '.', ':', 'return', ':', 'None'] | train | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/vpn/route.py#L332-L341 |
2,064 | robotools/fontParts | Lib/fontParts/base/groups.py | BaseGroups._get_side2KerningGroups | def _get_side2KerningGroups(self):
"""
Subclasses may override this method.
"""
found = {}
for name, contents in self.items():
if name.startswith("public.kern2."):
found[name] = contents
return found | python | def _get_side2KerningGroups(self):
"""
Subclasses may override this method.
"""
found = {}
for name, contents in self.items():
if name.startswith("public.kern2."):
found[name] = contents
return found | ['def', '_get_side2KerningGroups', '(', 'self', ')', ':', 'found', '=', '{', '}', 'for', 'name', ',', 'contents', 'in', 'self', '.', 'items', '(', ')', ':', 'if', 'name', '.', 'startswith', '(', '"public.kern2."', ')', ':', 'found', '[', 'name', ']', '=', 'contents', 'return', 'found'] | Subclasses may override this method. | ['Subclasses', 'may', 'override', 'this', 'method', '.'] | train | https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/groups.py#L149-L157 |
2,065 | BeyondTheClouds/enoslib | enoslib/task.py | _make_env | def _make_env(resultdir=None):
"""Loads the env from `resultdir` if not `None` or makes a new one.
An Enos environment handles all specific variables of an
experiment. This function either generates a new environment or
loads a previous one. If the value of `resultdir` is `None`, then
this function makes a new environment and return it. If the value
is a directory path that contains an Enos environment, then this function
loads and returns it.
In case of a directory path, this function also rereads the
configuration file (the reservation.yaml) and reloads it. This
lets the user update his configuration between each phase.
Args:
resultdir (str): directory path to load the env from.
"""
env = {
"config": {}, # The config
"resultdir": "", # Path to the result directory
"config_file": "", # The initial config file
"nodes": {}, # Roles with nodes
"phase": "", # Last phase that have been run
"user": "", # User id for this job
"cwd": os.getcwd() # Current Working Directory
}
if resultdir:
env_path = os.path.join(resultdir, "env")
if os.path.isfile(env_path):
with open(env_path, "r") as f:
env.update(yaml.load(f))
logger.debug("Loaded environment %s", env_path)
if "config_file" in env and env["config_file"] is not None:
# Resets the configuration of the environment
if os.path.isfile(env["config_file"]):
with open(env["config_file"], "r") as f:
env["config"].update(yaml.load(f))
logger.debug("Reloaded config %s", env["config"])
return env | python | def _make_env(resultdir=None):
"""Loads the env from `resultdir` if not `None` or makes a new one.
An Enos environment handles all specific variables of an
experiment. This function either generates a new environment or
loads a previous one. If the value of `resultdir` is `None`, then
this function makes a new environment and return it. If the value
is a directory path that contains an Enos environment, then this function
loads and returns it.
In case of a directory path, this function also rereads the
configuration file (the reservation.yaml) and reloads it. This
lets the user update his configuration between each phase.
Args:
resultdir (str): directory path to load the env from.
"""
env = {
"config": {}, # The config
"resultdir": "", # Path to the result directory
"config_file": "", # The initial config file
"nodes": {}, # Roles with nodes
"phase": "", # Last phase that have been run
"user": "", # User id for this job
"cwd": os.getcwd() # Current Working Directory
}
if resultdir:
env_path = os.path.join(resultdir, "env")
if os.path.isfile(env_path):
with open(env_path, "r") as f:
env.update(yaml.load(f))
logger.debug("Loaded environment %s", env_path)
if "config_file" in env and env["config_file"] is not None:
# Resets the configuration of the environment
if os.path.isfile(env["config_file"]):
with open(env["config_file"], "r") as f:
env["config"].update(yaml.load(f))
logger.debug("Reloaded config %s", env["config"])
return env | ['def', '_make_env', '(', 'resultdir', '=', 'None', ')', ':', 'env', '=', '{', '"config"', ':', '{', '}', ',', '# The config', '"resultdir"', ':', '""', ',', '# Path to the result directory', '"config_file"', ':', '""', ',', '# The initial config file', '"nodes"', ':', '{', '}', ',', '# Roles with nodes', '"phase"', ':', '""', ',', '# Last phase that have been run', '"user"', ':', '""', ',', '# User id for this job', '"cwd"', ':', 'os', '.', 'getcwd', '(', ')', '# Current Working Directory', '}', 'if', 'resultdir', ':', 'env_path', '=', 'os', '.', 'path', '.', 'join', '(', 'resultdir', ',', '"env"', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'env_path', ')', ':', 'with', 'open', '(', 'env_path', ',', '"r"', ')', 'as', 'f', ':', 'env', '.', 'update', '(', 'yaml', '.', 'load', '(', 'f', ')', ')', 'logger', '.', 'debug', '(', '"Loaded environment %s"', ',', 'env_path', ')', 'if', '"config_file"', 'in', 'env', 'and', 'env', '[', '"config_file"', ']', 'is', 'not', 'None', ':', '# Resets the configuration of the environment', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'env', '[', '"config_file"', ']', ')', ':', 'with', 'open', '(', 'env', '[', '"config_file"', ']', ',', '"r"', ')', 'as', 'f', ':', 'env', '[', '"config"', ']', '.', 'update', '(', 'yaml', '.', 'load', '(', 'f', ')', ')', 'logger', '.', 'debug', '(', '"Reloaded config %s"', ',', 'env', '[', '"config"', ']', ')', 'return', 'env'] | Loads the env from `resultdir` if not `None` or makes a new one.
An Enos environment handles all specific variables of an
experiment. This function either generates a new environment or
loads a previous one. If the value of `resultdir` is `None`, then
this function makes a new environment and return it. If the value
is a directory path that contains an Enos environment, then this function
loads and returns it.
In case of a directory path, this function also rereads the
configuration file (the reservation.yaml) and reloads it. This
lets the user update his configuration between each phase.
Args:
resultdir (str): directory path to load the env from. | ['Loads', 'the', 'env', 'from', 'resultdir', 'if', 'not', 'None', 'or', 'makes', 'a', 'new', 'one', '.'] | train | https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/task.py#L69-L110 |
2,066 | vnmabus/dcor | dcor/_dcor.py | distance_stats_sqr | def distance_stats_sqr(x, y, **kwargs):
"""
distance_stats_sqr(x, y, *, exponent=1)
Computes the usual (biased) estimators for the squared distance covariance
and squared distance correlation between two random vectors, and the
individual squared distance variances.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
Returns
-------
Stats
Squared distance covariance, squared distance correlation,
squared distance variance of the first random vector and
squared distance variance of the second random vector.
See Also
--------
distance_covariance_sqr
distance_correlation_sqr
Notes
-----
It is less efficient to compute the statistics separately, rather than
using this function, because some computations can be shared.
The algorithm uses the fast distance covariance algorithm proposed in
:cite:`b-fast_distance_correlation` when possible.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> dcor.distance_stats_sqr(a, a) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=52.0, correlation_xy=1.0, variance_x=52.0,
variance_y=52.0)
>>> dcor.distance_stats_sqr(a, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=1.0, correlation_xy=0.2773500...,
variance_x=52.0, variance_y=0.25)
>>> dcor.distance_stats_sqr(b, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.25, correlation_xy=1.0, variance_x=0.25,
variance_y=0.25)
>>> dcor.distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.3705904..., correlation_xy=0.4493308...,
variance_x=2.7209220..., variance_y=0.25)
"""
if _can_use_fast_algorithm(x, y, **kwargs):
return _distance_stats_sqr_fast(x, y)
else:
return _distance_sqr_stats_naive_generic(
x, y,
matrix_centered=_distance_matrix,
product=mean_product,
**kwargs) | python | def distance_stats_sqr(x, y, **kwargs):
"""
distance_stats_sqr(x, y, *, exponent=1)
Computes the usual (biased) estimators for the squared distance covariance
and squared distance correlation between two random vectors, and the
individual squared distance variances.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
Returns
-------
Stats
Squared distance covariance, squared distance correlation,
squared distance variance of the first random vector and
squared distance variance of the second random vector.
See Also
--------
distance_covariance_sqr
distance_correlation_sqr
Notes
-----
It is less efficient to compute the statistics separately, rather than
using this function, because some computations can be shared.
The algorithm uses the fast distance covariance algorithm proposed in
:cite:`b-fast_distance_correlation` when possible.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> dcor.distance_stats_sqr(a, a) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=52.0, correlation_xy=1.0, variance_x=52.0,
variance_y=52.0)
>>> dcor.distance_stats_sqr(a, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=1.0, correlation_xy=0.2773500...,
variance_x=52.0, variance_y=0.25)
>>> dcor.distance_stats_sqr(b, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.25, correlation_xy=1.0, variance_x=0.25,
variance_y=0.25)
>>> dcor.distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.3705904..., correlation_xy=0.4493308...,
variance_x=2.7209220..., variance_y=0.25)
"""
if _can_use_fast_algorithm(x, y, **kwargs):
return _distance_stats_sqr_fast(x, y)
else:
return _distance_sqr_stats_naive_generic(
x, y,
matrix_centered=_distance_matrix,
product=mean_product,
**kwargs) | ['def', 'distance_stats_sqr', '(', 'x', ',', 'y', ',', '*', '*', 'kwargs', ')', ':', 'if', '_can_use_fast_algorithm', '(', 'x', ',', 'y', ',', '*', '*', 'kwargs', ')', ':', 'return', '_distance_stats_sqr_fast', '(', 'x', ',', 'y', ')', 'else', ':', 'return', '_distance_sqr_stats_naive_generic', '(', 'x', ',', 'y', ',', 'matrix_centered', '=', '_distance_matrix', ',', 'product', '=', 'mean_product', ',', '*', '*', 'kwargs', ')'] | distance_stats_sqr(x, y, *, exponent=1)
Computes the usual (biased) estimators for the squared distance covariance
and squared distance correlation between two random vectors, and the
individual squared distance variances.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
Returns
-------
Stats
Squared distance covariance, squared distance correlation,
squared distance variance of the first random vector and
squared distance variance of the second random vector.
See Also
--------
distance_covariance_sqr
distance_correlation_sqr
Notes
-----
It is less efficient to compute the statistics separately, rather than
using this function, because some computations can be shared.
The algorithm uses the fast distance covariance algorithm proposed in
:cite:`b-fast_distance_correlation` when possible.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> dcor.distance_stats_sqr(a, a) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=52.0, correlation_xy=1.0, variance_x=52.0,
variance_y=52.0)
>>> dcor.distance_stats_sqr(a, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=1.0, correlation_xy=0.2773500...,
variance_x=52.0, variance_y=0.25)
>>> dcor.distance_stats_sqr(b, b) # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.25, correlation_xy=1.0, variance_x=0.25,
variance_y=0.25)
>>> dcor.distance_stats_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
Stats(covariance_xy=0.3705904..., correlation_xy=0.4493308...,
variance_x=2.7209220..., variance_y=0.25) | ['distance_stats_sqr', '(', 'x', 'y', '*', 'exponent', '=', '1', ')'] | train | https://github.com/vnmabus/dcor/blob/b0ff1273c0a52efdabdfdadefc7ff2a49def7e8d/dcor/_dcor.py#L537-L609 |
2,067 | spyder-ide/spyder | spyder/plugins/projects/plugin.py | Projects.switch_to_plugin | def switch_to_plugin(self):
"""Switch to plugin."""
# Unmaxizime currently maximized plugin
if (self.main.last_plugin is not None and
self.main.last_plugin.ismaximized and
self.main.last_plugin is not self):
self.main.maximize_dockwidget()
# Show plugin only if it was already visible
if self.get_option('visible_if_project_open'):
if not self.toggle_view_action.isChecked():
self.toggle_view_action.setChecked(True)
self.visibility_changed(True) | python | def switch_to_plugin(self):
"""Switch to plugin."""
# Unmaxizime currently maximized plugin
if (self.main.last_plugin is not None and
self.main.last_plugin.ismaximized and
self.main.last_plugin is not self):
self.main.maximize_dockwidget()
# Show plugin only if it was already visible
if self.get_option('visible_if_project_open'):
if not self.toggle_view_action.isChecked():
self.toggle_view_action.setChecked(True)
self.visibility_changed(True) | ['def', 'switch_to_plugin', '(', 'self', ')', ':', '# Unmaxizime currently maximized plugin\r', 'if', '(', 'self', '.', 'main', '.', 'last_plugin', 'is', 'not', 'None', 'and', 'self', '.', 'main', '.', 'last_plugin', '.', 'ismaximized', 'and', 'self', '.', 'main', '.', 'last_plugin', 'is', 'not', 'self', ')', ':', 'self', '.', 'main', '.', 'maximize_dockwidget', '(', ')', '# Show plugin only if it was already visible\r', 'if', 'self', '.', 'get_option', '(', "'visible_if_project_open'", ')', ':', 'if', 'not', 'self', '.', 'toggle_view_action', '.', 'isChecked', '(', ')', ':', 'self', '.', 'toggle_view_action', '.', 'setChecked', '(', 'True', ')', 'self', '.', 'visibility_changed', '(', 'True', ')'] | Switch to plugin. | ['Switch', 'to', 'plugin', '.'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/plugin.py#L178-L190 |
2,068 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _AddPropertiesForRepeatedField | def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc)) | python | def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc)) | ['def', '_AddPropertiesForRepeatedField', '(', 'field', ',', 'cls', ')', ':', 'proto_field_name', '=', 'field', '.', 'name', 'property_name', '=', '_PropertyName', '(', 'proto_field_name', ')', 'def', 'getter', '(', 'self', ')', ':', 'field_value', '=', 'self', '.', '_fields', '.', 'get', '(', 'field', ')', 'if', 'field_value', 'is', 'None', ':', '# Construct a new object to represent this field.', 'field_value', '=', 'field', '.', '_default_constructor', '(', 'self', ')', '# Atomically check if another thread has preempted us and, if not, swap', '# in the new object we just created. If someone has preempted us, we', '# take that object and discard ours.', '# WARNING: We are relying on setdefault() being atomic. This is true', "# in CPython but we haven't investigated others. This warning appears", '# in several other locations in this file.', 'field_value', '=', 'self', '.', '_fields', '.', 'setdefault', '(', 'field', ',', 'field_value', ')', 'return', 'field_value', 'getter', '.', '__module__', '=', 'None', 'getter', '.', '__doc__', '=', "'Getter for %s.'", '%', 'proto_field_name', '# We define a setter just so we can throw an exception with a more', '# helpful error message.', 'def', 'setter', '(', 'self', ',', 'new_value', ')', ':', 'raise', 'AttributeError', '(', "'Assignment not allowed to repeated field '", '\'"%s" in protocol message object.\'', '%', 'proto_field_name', ')', 'doc', '=', '\'Magic attribute generated for "%s" proto field.\'', '%', 'proto_field_name', 'setattr', '(', 'cls', ',', 'property_name', ',', 'property', '(', 'getter', ',', 'setter', ',', 'doc', '=', 'doc', ')', ')'] | Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing. | ['Adds', 'a', 'public', 'property', 'for', 'a', 'repeated', 'protocol', 'message', 'field', '.', 'Clients', 'can', 'use', 'this', 'property', 'to', 'get', 'the', 'value', 'of', 'the', 'field', 'which', 'will', 'be', 'either', 'a', '_RepeatedScalarFieldContainer', 'or', '_RepeatedCompositeFieldContainer', '(', 'see', 'below', ')', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L586-L627 |
2,069 | fvdsn/py-xml-escpos | xmlescpos/escpos.py | Escpos._print_image | def _print_image(self, line, size):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
self._raw(S_RASTER_N)
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
self._raw(buffer.decode('hex'))
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
self._raw(buffer.decode("hex"))
buffer = ""
cont = 0 | python | def _print_image(self, line, size):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
self._raw(S_RASTER_N)
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
self._raw(buffer.decode('hex'))
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
self._raw(buffer.decode("hex"))
buffer = ""
cont = 0 | ['def', '_print_image', '(', 'self', ',', 'line', ',', 'size', ')', ':', 'i', '=', '0', 'cont', '=', '0', 'buffer', '=', '""', 'self', '.', '_raw', '(', 'S_RASTER_N', ')', 'buffer', '=', '"%02X%02X%02X%02X"', '%', '(', '(', '(', 'size', '[', '0', ']', '/', 'size', '[', '1', ']', ')', '/', '8', ')', ',', '0', ',', 'size', '[', '1', ']', ',', '0', ')', 'self', '.', '_raw', '(', 'buffer', '.', 'decode', '(', "'hex'", ')', ')', 'buffer', '=', '""', 'while', 'i', '<', 'len', '(', 'line', ')', ':', 'hex_string', '=', 'int', '(', 'line', '[', 'i', ':', 'i', '+', '8', ']', ',', '2', ')', 'buffer', '+=', '"%02X"', '%', 'hex_string', 'i', '+=', '8', 'cont', '+=', '1', 'if', 'cont', '%', '4', '==', '0', ':', 'self', '.', '_raw', '(', 'buffer', '.', 'decode', '(', '"hex"', ')', ')', 'buffer', '=', '""', 'cont', '=', '0'] | Print formatted image | ['Print', 'formatted', 'image'] | train | https://github.com/fvdsn/py-xml-escpos/blob/7f77e039c960d5773fb919aed02ba392dccbc360/xmlescpos/escpos.py#L325-L345 |
2,070 | openstack/networking-cisco | networking_cisco/plugins/cisco/device_manager/plugging_drivers/vif_hotplug_plugging_driver.py | VIFHotPlugPluggingDriver.delete_hosting_device_resources | def delete_hosting_device_resources(self, context, tenant_id, mgmt_port,
**kwargs):
"""Deletes resources for a hosting device in a plugin specific way."""
if mgmt_port is not None:
try:
self._cleanup_hosting_port(context, mgmt_port['id'])
except n_exc.NeutronException as e:
LOG.error("Unable to delete port:%(port)s after %(tries)d"
" attempts due to exception %(exception)s. "
"Skipping it", {'port': mgmt_port['id'],
'tries': DELETION_ATTEMPTS,
'exception': str(e)}) | python | def delete_hosting_device_resources(self, context, tenant_id, mgmt_port,
**kwargs):
"""Deletes resources for a hosting device in a plugin specific way."""
if mgmt_port is not None:
try:
self._cleanup_hosting_port(context, mgmt_port['id'])
except n_exc.NeutronException as e:
LOG.error("Unable to delete port:%(port)s after %(tries)d"
" attempts due to exception %(exception)s. "
"Skipping it", {'port': mgmt_port['id'],
'tries': DELETION_ATTEMPTS,
'exception': str(e)}) | ['def', 'delete_hosting_device_resources', '(', 'self', ',', 'context', ',', 'tenant_id', ',', 'mgmt_port', ',', '*', '*', 'kwargs', ')', ':', 'if', 'mgmt_port', 'is', 'not', 'None', ':', 'try', ':', 'self', '.', '_cleanup_hosting_port', '(', 'context', ',', 'mgmt_port', '[', "'id'", ']', ')', 'except', 'n_exc', '.', 'NeutronException', 'as', 'e', ':', 'LOG', '.', 'error', '(', '"Unable to delete port:%(port)s after %(tries)d"', '" attempts due to exception %(exception)s. "', '"Skipping it"', ',', '{', "'port'", ':', 'mgmt_port', '[', "'id'", ']', ',', "'tries'", ':', 'DELETION_ATTEMPTS', ',', "'exception'", ':', 'str', '(', 'e', ')', '}', ')'] | Deletes resources for a hosting device in a plugin specific way. | ['Deletes', 'resources', 'for', 'a', 'hosting', 'device', 'in', 'a', 'plugin', 'specific', 'way', '.'] | train | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/plugging_drivers/vif_hotplug_plugging_driver.py#L107-L119 |
2,071 | brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py | brocade_tunnels.overlay_gateway_site_extend_vlan_add | def overlay_gateway_site_extend_vlan_add(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
extend = ET.SubElement(site, "extend")
vlan = ET.SubElement(extend, "vlan")
add = ET.SubElement(vlan, "add")
add.text = kwargs.pop('add')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def overlay_gateway_site_extend_vlan_add(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
extend = ET.SubElement(site, "extend")
vlan = ET.SubElement(extend, "vlan")
add = ET.SubElement(vlan, "add")
add.text = kwargs.pop('add')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'overlay_gateway_site_extend_vlan_add', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'overlay_gateway', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"overlay-gateway"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-tunnels"', ')', 'name_key', '=', 'ET', '.', 'SubElement', '(', 'overlay_gateway', ',', '"name"', ')', 'name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'name'", ')', 'site', '=', 'ET', '.', 'SubElement', '(', 'overlay_gateway', ',', '"site"', ')', 'name_key', '=', 'ET', '.', 'SubElement', '(', 'site', ',', '"name"', ')', 'name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'name'", ')', 'extend', '=', 'ET', '.', 'SubElement', '(', 'site', ',', '"extend"', ')', 'vlan', '=', 'ET', '.', 'SubElement', '(', 'extend', ',', '"vlan"', ')', 'add', '=', 'ET', '.', 'SubElement', '(', 'vlan', ',', '"add"', ')', 'add', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'add'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py#L304-L320 |
2,072 | log2timeline/dftimewolf | dftimewolf/lib/collectors/filesystem.py | FilesystemCollector.process | def process(self):
"""Checks whether the paths exists and updates the state accordingly."""
for path in self._paths:
if os.path.exists(path):
self.state.output.append((os.path.basename(path), path))
else:
self.state.add_error(
'Path {0:s} does not exist'.format(str(path)), critical=False)
if not self.state.output:
self.state.add_error('No valid paths collected, bailing', critical=True) | python | def process(self):
"""Checks whether the paths exists and updates the state accordingly."""
for path in self._paths:
if os.path.exists(path):
self.state.output.append((os.path.basename(path), path))
else:
self.state.add_error(
'Path {0:s} does not exist'.format(str(path)), critical=False)
if not self.state.output:
self.state.add_error('No valid paths collected, bailing', critical=True) | ['def', 'process', '(', 'self', ')', ':', 'for', 'path', 'in', 'self', '.', '_paths', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'self', '.', 'state', '.', 'output', '.', 'append', '(', '(', 'os', '.', 'path', '.', 'basename', '(', 'path', ')', ',', 'path', ')', ')', 'else', ':', 'self', '.', 'state', '.', 'add_error', '(', "'Path {0:s} does not exist'", '.', 'format', '(', 'str', '(', 'path', ')', ')', ',', 'critical', '=', 'False', ')', 'if', 'not', 'self', '.', 'state', '.', 'output', ':', 'self', '.', 'state', '.', 'add_error', '(', "'No valid paths collected, bailing'", ',', 'critical', '=', 'True', ')'] | Checks whether the paths exists and updates the state accordingly. | ['Checks', 'whether', 'the', 'paths', 'exists', 'and', 'updates', 'the', 'state', 'accordingly', '.'] | train | https://github.com/log2timeline/dftimewolf/blob/45f898476a288d73c4256ae8e3836a2a4848c0d7/dftimewolf/lib/collectors/filesystem.py#L37-L46 |
2,073 | proteanhq/protean | src/protean/core/entity.py | Entity.exists | def exists(cls, excludes_, **filters):
""" Return `True` if objects matching the provided filters and excludes
exist if not return false.
Calls the `filter` method by default, but can be overridden for better and
quicker implementations that may be supported by a database.
:param excludes_: entities without this combination of field name and
values will be returned
"""
results = cls.query.filter(**filters).exclude(**excludes_)
return bool(results) | python | def exists(cls, excludes_, **filters):
""" Return `True` if objects matching the provided filters and excludes
exist if not return false.
Calls the `filter` method by default, but can be overridden for better and
quicker implementations that may be supported by a database.
:param excludes_: entities without this combination of field name and
values will be returned
"""
results = cls.query.filter(**filters).exclude(**excludes_)
return bool(results) | ['def', 'exists', '(', 'cls', ',', 'excludes_', ',', '*', '*', 'filters', ')', ':', 'results', '=', 'cls', '.', 'query', '.', 'filter', '(', '*', '*', 'filters', ')', '.', 'exclude', '(', '*', '*', 'excludes_', ')', 'return', 'bool', '(', 'results', ')'] | Return `True` if objects matching the provided filters and excludes
exist if not return false.
Calls the `filter` method by default, but can be overridden for better and
quicker implementations that may be supported by a database.
:param excludes_: entities without this combination of field name and
values will be returned | ['Return', 'True', 'if', 'objects', 'matching', 'the', 'provided', 'filters', 'and', 'excludes', 'exist', 'if', 'not', 'return', 'false', '.'] | train | https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/entity.py#L429-L440 |
2,074 | mcocdawc/chemcoord | src/chemcoord/configuration.py | write_configuration_file | def write_configuration_file(filepath=_give_default_file_path(),
overwrite=False):
"""Create a configuration file.
Writes the current state of settings into a configuration file.
.. note:: Since a file is permamently written, this function
is strictly speaking not sideeffect free.
Args:
filepath (str): Where to write the file.
The default is under both UNIX and Windows ``~/.chemcoordrc``.
overwrite (bool):
Returns:
None:
"""
config = configparser.ConfigParser()
config.read_dict(settings)
if os.path.isfile(filepath) and not overwrite:
try:
raise FileExistsError
except NameError: # because of python2
warn('File exists already and overwrite is False (default).')
else:
with open(filepath, 'w') as configfile:
config.write(configfile) | python | def write_configuration_file(filepath=_give_default_file_path(),
overwrite=False):
"""Create a configuration file.
Writes the current state of settings into a configuration file.
.. note:: Since a file is permamently written, this function
is strictly speaking not sideeffect free.
Args:
filepath (str): Where to write the file.
The default is under both UNIX and Windows ``~/.chemcoordrc``.
overwrite (bool):
Returns:
None:
"""
config = configparser.ConfigParser()
config.read_dict(settings)
if os.path.isfile(filepath) and not overwrite:
try:
raise FileExistsError
except NameError: # because of python2
warn('File exists already and overwrite is False (default).')
else:
with open(filepath, 'w') as configfile:
config.write(configfile) | ['def', 'write_configuration_file', '(', 'filepath', '=', '_give_default_file_path', '(', ')', ',', 'overwrite', '=', 'False', ')', ':', 'config', '=', 'configparser', '.', 'ConfigParser', '(', ')', 'config', '.', 'read_dict', '(', 'settings', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'filepath', ')', 'and', 'not', 'overwrite', ':', 'try', ':', 'raise', 'FileExistsError', 'except', 'NameError', ':', '# because of python2', 'warn', '(', "'File exists already and overwrite is False (default).'", ')', 'else', ':', 'with', 'open', '(', 'filepath', ',', "'w'", ')', 'as', 'configfile', ':', 'config', '.', 'write', '(', 'configfile', ')'] | Create a configuration file.
Writes the current state of settings into a configuration file.
.. note:: Since a file is permamently written, this function
is strictly speaking not sideeffect free.
Args:
filepath (str): Where to write the file.
The default is under both UNIX and Windows ``~/.chemcoordrc``.
overwrite (bool):
Returns:
None: | ['Create', 'a', 'configuration', 'file', '.'] | train | https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/configuration.py#L32-L59 |
2,075 | pallets/werkzeug | src/werkzeug/http.py | is_byte_range_valid | def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length | python | def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length | ['def', 'is_byte_range_valid', '(', 'start', ',', 'stop', ',', 'length', ')', ':', 'if', '(', 'start', 'is', 'None', ')', '!=', '(', 'stop', 'is', 'None', ')', ':', 'return', 'False', 'elif', 'start', 'is', 'None', ':', 'return', 'length', 'is', 'None', 'or', 'length', '>=', '0', 'elif', 'length', 'is', 'None', ':', 'return', '0', '<=', 'start', '<', 'stop', 'elif', 'start', '>=', 'stop', ':', 'return', 'False', 'return', '0', '<=', 'start', '<', 'length'] | Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7 | ['Checks', 'if', 'a', 'given', 'byte', 'content', 'range', 'is', 'valid', 'for', 'the', 'given', 'length', '.'] | train | https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/http.py#L1222-L1235 |
2,076 | Aloomaio/python-sdk | alooma_pysdk/alooma_pysdk.py | _Sender.__get_event | def __get_event(self, block=True, timeout=1):
"""
Retrieves an event. If self._exceeding_event is not None, it'll be
returned. Otherwise, an event is dequeued from the event buffer. If
The event which was retrieved is bigger than the permitted batch size,
it'll be omitted, and the next event in the event buffer is returned
"""
while True:
if self._exceeding_event: # An event was omitted from last batch
event = self._exceeding_event
self._exceeding_event = None
else: # No omitted event, get an event from the queue
event = self._event_queue.get(block, timeout)
event_size = len(event)
# If the event is bigger than the permitted batch size, ignore it
# The ( - 2 ) accounts for the parentheses enclosing the batch
if event_size - 2 >= self._batch_max_size:
self._notify(logging.WARNING,
consts.LOG_MSG_OMITTED_OVERSIZED_EVENT
% event_size)
else: # Event is of valid size, return it
return event | python | def __get_event(self, block=True, timeout=1):
"""
Retrieves an event. If self._exceeding_event is not None, it'll be
returned. Otherwise, an event is dequeued from the event buffer. If
The event which was retrieved is bigger than the permitted batch size,
it'll be omitted, and the next event in the event buffer is returned
"""
while True:
if self._exceeding_event: # An event was omitted from last batch
event = self._exceeding_event
self._exceeding_event = None
else: # No omitted event, get an event from the queue
event = self._event_queue.get(block, timeout)
event_size = len(event)
# If the event is bigger than the permitted batch size, ignore it
# The ( - 2 ) accounts for the parentheses enclosing the batch
if event_size - 2 >= self._batch_max_size:
self._notify(logging.WARNING,
consts.LOG_MSG_OMITTED_OVERSIZED_EVENT
% event_size)
else: # Event is of valid size, return it
return event | ['def', '__get_event', '(', 'self', ',', 'block', '=', 'True', ',', 'timeout', '=', '1', ')', ':', 'while', 'True', ':', 'if', 'self', '.', '_exceeding_event', ':', '# An event was omitted from last batch', 'event', '=', 'self', '.', '_exceeding_event', 'self', '.', '_exceeding_event', '=', 'None', 'else', ':', '# No omitted event, get an event from the queue', 'event', '=', 'self', '.', '_event_queue', '.', 'get', '(', 'block', ',', 'timeout', ')', 'event_size', '=', 'len', '(', 'event', ')', '# If the event is bigger than the permitted batch size, ignore it', '# The ( - 2 ) accounts for the parentheses enclosing the batch', 'if', 'event_size', '-', '2', '>=', 'self', '.', '_batch_max_size', ':', 'self', '.', '_notify', '(', 'logging', '.', 'WARNING', ',', 'consts', '.', 'LOG_MSG_OMITTED_OVERSIZED_EVENT', '%', 'event_size', ')', 'else', ':', '# Event is of valid size, return it', 'return', 'event'] | Retrieves an event. If self._exceeding_event is not None, it'll be
returned. Otherwise, an event is dequeued from the event buffer. If
The event which was retrieved is bigger than the permitted batch size,
it'll be omitted, and the next event in the event buffer is returned | ['Retrieves', 'an', 'event', '.', 'If', 'self', '.', '_exceeding_event', 'is', 'not', 'None', 'it', 'll', 'be', 'returned', '.', 'Otherwise', 'an', 'event', 'is', 'dequeued', 'from', 'the', 'event', 'buffer', '.', 'If', 'The', 'event', 'which', 'was', 'retrieved', 'is', 'bigger', 'than', 'the', 'permitted', 'batch', 'size', 'it', 'll', 'be', 'omitted', 'and', 'the', 'next', 'event', 'in', 'the', 'event', 'buffer', 'is', 'returned'] | train | https://github.com/Aloomaio/python-sdk/blob/e6e7322d0b23d90b1ff0320e9a9c431c82c0c277/alooma_pysdk/alooma_pysdk.py#L591-L614 |
2,077 | MozillaSecurity/laniakea | laniakea/core/providers/ec2/manager.py | EC2Manager.retry_on_ec2_error | def retry_on_ec2_error(self, func, *args, **kwargs):
"""
Call the given method with the given arguments, retrying if the call
failed due to an EC2ResponseError. This method will wait at most 30
seconds and perform up to 6 retries. If the method still fails, it will
propagate the error.
:param func: Function to call
:type func: function
"""
exception_retry_count = 6
while True:
try:
return func(*args, **kwargs)
except (boto.exception.EC2ResponseError, ssl.SSLError) as msg:
exception_retry_count -= 1
if exception_retry_count <= 0:
raise msg
time.sleep(5) | python | def retry_on_ec2_error(self, func, *args, **kwargs):
"""
Call the given method with the given arguments, retrying if the call
failed due to an EC2ResponseError. This method will wait at most 30
seconds and perform up to 6 retries. If the method still fails, it will
propagate the error.
:param func: Function to call
:type func: function
"""
exception_retry_count = 6
while True:
try:
return func(*args, **kwargs)
except (boto.exception.EC2ResponseError, ssl.SSLError) as msg:
exception_retry_count -= 1
if exception_retry_count <= 0:
raise msg
time.sleep(5) | ['def', 'retry_on_ec2_error', '(', 'self', ',', 'func', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'exception_retry_count', '=', '6', 'while', 'True', ':', 'try', ':', 'return', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'except', '(', 'boto', '.', 'exception', '.', 'EC2ResponseError', ',', 'ssl', '.', 'SSLError', ')', 'as', 'msg', ':', 'exception_retry_count', '-=', '1', 'if', 'exception_retry_count', '<=', '0', ':', 'raise', 'msg', 'time', '.', 'sleep', '(', '5', ')'] | Call the given method with the given arguments, retrying if the call
failed due to an EC2ResponseError. This method will wait at most 30
seconds and perform up to 6 retries. If the method still fails, it will
propagate the error.
:param func: Function to call
:type func: function | ['Call', 'the', 'given', 'method', 'with', 'the', 'given', 'arguments', 'retrying', 'if', 'the', 'call', 'failed', 'due', 'to', 'an', 'EC2ResponseError', '.', 'This', 'method', 'will', 'wait', 'at', 'most', '30', 'seconds', 'and', 'perform', 'up', 'to', '6', 'retries', '.', 'If', 'the', 'method', 'still', 'fails', 'it', 'will', 'propagate', 'the', 'error', '.'] | train | https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/ec2/manager.py#L36-L54 |
2,078 | pip-services3-python/pip-services3-commons-python | pip_services3_commons/validate/PropertySchema.py | PropertySchema._perform_validation | def _perform_validation(self, path, value, results):
"""
Validates a given value against the schema and configured validation rules.
:param path: a dot notation path to the value.
:param value: a value to be validated.
:param results: a list with validation results to add new results.
"""
path = self.name if path == None or len(path) == 0 else path + "." + self.name
super(PropertySchema, self)._perform_validation(path, value, results)
self._perform_type_validation(path, self.value_type, value, results) | python | def _perform_validation(self, path, value, results):
"""
Validates a given value against the schema and configured validation rules.
:param path: a dot notation path to the value.
:param value: a value to be validated.
:param results: a list with validation results to add new results.
"""
path = self.name if path == None or len(path) == 0 else path + "." + self.name
super(PropertySchema, self)._perform_validation(path, value, results)
self._perform_type_validation(path, self.value_type, value, results) | ['def', '_perform_validation', '(', 'self', ',', 'path', ',', 'value', ',', 'results', ')', ':', 'path', '=', 'self', '.', 'name', 'if', 'path', '==', 'None', 'or', 'len', '(', 'path', ')', '==', '0', 'else', 'path', '+', '"."', '+', 'self', '.', 'name', 'super', '(', 'PropertySchema', ',', 'self', ')', '.', '_perform_validation', '(', 'path', ',', 'value', ',', 'results', ')', 'self', '.', '_perform_type_validation', '(', 'path', ',', 'self', '.', 'value_type', ',', 'value', ',', 'results', ')'] | Validates a given value against the schema and configured validation rules.
:param path: a dot notation path to the value.
:param value: a value to be validated.
:param results: a list with validation results to add new results. | ['Validates', 'a', 'given', 'value', 'against', 'the', 'schema', 'and', 'configured', 'validation', 'rules', '.'] | train | https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/validate/PropertySchema.py#L40-L53 |
2,079 | psd-tools/psd-tools | src/psd_tools/api/pil_io.py | get_color_mode | def get_color_mode(mode):
"""Convert PIL mode to ColorMode."""
name = mode.upper()
name = name.rstrip('A') # Trim alpha.
name = {'1': 'BITMAP', 'L': 'GRAYSCALE'}.get(name, name)
return getattr(ColorMode, name) | python | def get_color_mode(mode):
"""Convert PIL mode to ColorMode."""
name = mode.upper()
name = name.rstrip('A') # Trim alpha.
name = {'1': 'BITMAP', 'L': 'GRAYSCALE'}.get(name, name)
return getattr(ColorMode, name) | ['def', 'get_color_mode', '(', 'mode', ')', ':', 'name', '=', 'mode', '.', 'upper', '(', ')', 'name', '=', 'name', '.', 'rstrip', '(', "'A'", ')', '# Trim alpha.', 'name', '=', '{', "'1'", ':', "'BITMAP'", ',', "'L'", ':', "'GRAYSCALE'", '}', '.', 'get', '(', 'name', ',', 'name', ')', 'return', 'getattr', '(', 'ColorMode', ',', 'name', ')'] | Convert PIL mode to ColorMode. | ['Convert', 'PIL', 'mode', 'to', 'ColorMode', '.'] | train | https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/pil_io.py#L14-L19 |
2,080 | ska-sa/purr | Purr/Editors.py | _sanitizeFilename | def _sanitizeFilename(filename):
"""Sanitizes filename for use on Windows and other brain-dead systems, by replacing a number of illegal characters
with underscores."""
global _sanitize_trans
out = filename.translate(_sanitize_trans)
# leading dot becomes "_"
if out and out[0] == '.':
out = out[1:]
return out | python | def _sanitizeFilename(filename):
"""Sanitizes filename for use on Windows and other brain-dead systems, by replacing a number of illegal characters
with underscores."""
global _sanitize_trans
out = filename.translate(_sanitize_trans)
# leading dot becomes "_"
if out and out[0] == '.':
out = out[1:]
return out | ['def', '_sanitizeFilename', '(', 'filename', ')', ':', 'global', '_sanitize_trans', 'out', '=', 'filename', '.', 'translate', '(', '_sanitize_trans', ')', '# leading dot becomes "_"', 'if', 'out', 'and', 'out', '[', '0', ']', '==', "'.'", ':', 'out', '=', 'out', '[', '1', ':', ']', 'return', 'out'] | Sanitizes filename for use on Windows and other brain-dead systems, by replacing a number of illegal characters
with underscores. | ['Sanitizes', 'filename', 'for', 'use', 'on', 'Windows', 'and', 'other', 'brain', '-', 'dead', 'systems', 'by', 'replacing', 'a', 'number', 'of', 'illegal', 'characters', 'with', 'underscores', '.'] | train | https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Editors.py#L49-L57 |
2,081 | iancmcc/ouimeaux | ouimeaux/device/__init__.py | Device.get_state | def get_state(self, force_update=False):
"""
Returns 0 if off and 1 if on.
"""
if force_update or self._state is None:
return int(self.basicevent.GetBinaryState()['BinaryState'])
return self._state | python | def get_state(self, force_update=False):
"""
Returns 0 if off and 1 if on.
"""
if force_update or self._state is None:
return int(self.basicevent.GetBinaryState()['BinaryState'])
return self._state | ['def', 'get_state', '(', 'self', ',', 'force_update', '=', 'False', ')', ':', 'if', 'force_update', 'or', 'self', '.', '_state', 'is', 'None', ':', 'return', 'int', '(', 'self', '.', 'basicevent', '.', 'GetBinaryState', '(', ')', '[', "'BinaryState'", ']', ')', 'return', 'self', '.', '_state'] | Returns 0 if off and 1 if on. | ['Returns', '0', 'if', 'off', 'and', '1', 'if', 'on', '.'] | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/device/__init__.py#L36-L42 |
2,082 | awslabs/serverless-application-model | samtranslator/intrinsics/resource_refs.py | SupportedResourceReferences.add | def add(self, logical_id, property, value):
"""
Add the information that resource with given `logical_id` supports the given `property`, and that a reference
to `logical_id.property` resolves to given `value.
Example:
"MyApi.Deployment" -> "MyApiDeployment1234567890"
:param logical_id: Logical ID of the resource (Ex: MyLambdaFunction)
:param property: Property on the resource that can be referenced (Ex: Alias)
:param value: Value that this reference resolves to.
:return: nothing
"""
if not logical_id or not property:
raise ValueError("LogicalId and property must be a non-empty string")
if not value or not isinstance(value, string_types):
raise ValueError("Property value must be a non-empty string")
if logical_id not in self._refs:
self._refs[logical_id] = {}
if property in self._refs[logical_id]:
raise ValueError("Cannot add second reference value to {}.{} property".format(logical_id, property))
self._refs[logical_id][property] = value | python | def add(self, logical_id, property, value):
"""
Add the information that resource with given `logical_id` supports the given `property`, and that a reference
to `logical_id.property` resolves to given `value.
Example:
"MyApi.Deployment" -> "MyApiDeployment1234567890"
:param logical_id: Logical ID of the resource (Ex: MyLambdaFunction)
:param property: Property on the resource that can be referenced (Ex: Alias)
:param value: Value that this reference resolves to.
:return: nothing
"""
if not logical_id or not property:
raise ValueError("LogicalId and property must be a non-empty string")
if not value or not isinstance(value, string_types):
raise ValueError("Property value must be a non-empty string")
if logical_id not in self._refs:
self._refs[logical_id] = {}
if property in self._refs[logical_id]:
raise ValueError("Cannot add second reference value to {}.{} property".format(logical_id, property))
self._refs[logical_id][property] = value | ['def', 'add', '(', 'self', ',', 'logical_id', ',', 'property', ',', 'value', ')', ':', 'if', 'not', 'logical_id', 'or', 'not', 'property', ':', 'raise', 'ValueError', '(', '"LogicalId and property must be a non-empty string"', ')', 'if', 'not', 'value', 'or', 'not', 'isinstance', '(', 'value', ',', 'string_types', ')', ':', 'raise', 'ValueError', '(', '"Property value must be a non-empty string"', ')', 'if', 'logical_id', 'not', 'in', 'self', '.', '_refs', ':', 'self', '.', '_refs', '[', 'logical_id', ']', '=', '{', '}', 'if', 'property', 'in', 'self', '.', '_refs', '[', 'logical_id', ']', ':', 'raise', 'ValueError', '(', '"Cannot add second reference value to {}.{} property"', '.', 'format', '(', 'logical_id', ',', 'property', ')', ')', 'self', '.', '_refs', '[', 'logical_id', ']', '[', 'property', ']', '=', 'value'] | Add the information that resource with given `logical_id` supports the given `property`, and that a reference
to `logical_id.property` resolves to given `value.
Example:
"MyApi.Deployment" -> "MyApiDeployment1234567890"
:param logical_id: Logical ID of the resource (Ex: MyLambdaFunction)
:param property: Property on the resource that can be referenced (Ex: Alias)
:param value: Value that this reference resolves to.
:return: nothing | ['Add', 'the', 'information', 'that', 'resource', 'with', 'given', 'logical_id', 'supports', 'the', 'given', 'property', 'and', 'that', 'a', 'reference', 'to', 'logical_id', '.', 'property', 'resolves', 'to', 'given', 'value', '.'] | train | https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/intrinsics/resource_refs.py#L17-L44 |
2,083 | pytroll/trollimage | trollimage/image.py | Image._ycbcr2l | def _ycbcr2l(self, mode):
"""Convert from YCbCr to L.
"""
self._check_modes(("YCbCr", "YCbCrA"))
self.channels = [self.channels[0]] + self.channels[3:]
if self.fill_value is not None:
self.fill_value = [self.fill_value[0]] + self.fill_value[3:]
self.mode = mode | python | def _ycbcr2l(self, mode):
"""Convert from YCbCr to L.
"""
self._check_modes(("YCbCr", "YCbCrA"))
self.channels = [self.channels[0]] + self.channels[3:]
if self.fill_value is not None:
self.fill_value = [self.fill_value[0]] + self.fill_value[3:]
self.mode = mode | ['def', '_ycbcr2l', '(', 'self', ',', 'mode', ')', ':', 'self', '.', '_check_modes', '(', '(', '"YCbCr"', ',', '"YCbCrA"', ')', ')', 'self', '.', 'channels', '=', '[', 'self', '.', 'channels', '[', '0', ']', ']', '+', 'self', '.', 'channels', '[', '3', ':', ']', 'if', 'self', '.', 'fill_value', 'is', 'not', 'None', ':', 'self', '.', 'fill_value', '=', '[', 'self', '.', 'fill_value', '[', '0', ']', ']', '+', 'self', '.', 'fill_value', '[', '3', ':', ']', 'self', '.', 'mode', '=', 'mode'] | Convert from YCbCr to L. | ['Convert', 'from', 'YCbCr', 'to', 'L', '.'] | train | https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/image.py#L627-L635 |
2,084 | wonambi-python/wonambi | wonambi/source/linear.py | calc_xyz2surf | def calc_xyz2surf(surf, xyz, threshold=20, exponent=None, std=None):
"""Calculate transformation matrix from xyz values to vertices.
Parameters
----------
surf : instance of wonambi.attr.Surf
the surface of only one hemisphere.
xyz : numpy.ndarray
nChan x 3 matrix, with the locations in x, y, z.
std : float
distance in mm of the Gaussian kernel
exponent : int
inverse law (1-> direct inverse, 2-> inverse square, 3-> inverse cube)
threshold : float
distance in mm for a vertex to pick up electrode activity (if distance
is above the threshold, one electrode does not affect a vertex).
Returns
-------
numpy.ndarray
nVertices X xyz.shape[0] matrix
Notes
-----
This function is a helper when plotting onto brain surface, by creating a
transformation matrix from the values in space (f.e. at each electrode) to
the position of the vertices (used to show the brain surface).
There are many ways to move from values to vertices. The crucial parameter
is the function at which activity decreases in respect to the distance. You
can have an inverse relationship by specifying 'exponent'. If 'exponent' is
2, then the activity will decrease as inverse square of the distance. The
function can be a Gaussian. With std, you specify the width of the gaussian
kernel in mm.
For each vertex, it uses a threshold based on the distance ('threshold'
value, in mm). Finally, it normalizes the contribution of all the channels
to 1, so that the sum of the coefficients for each vertex is 1.
You can also create your own matrix (and skip calc_xyz2surf altogether) and
pass it as attribute to the main figure.
Because it's a loop over all the vertices, this function is pretty slow,
but if you calculate it once, you can reuse it.
We take advantage of multiprocessing, which speeds it up considerably.
"""
if exponent is None and std is None:
exponent = 1
if exponent is not None:
lg.debug('Vertex values based on inverse-law, with exponent ' +
str(exponent))
funct = partial(calc_one_vert_inverse, xyz=xyz, exponent=exponent)
elif std is not None:
lg.debug('Vertex values based on gaussian, with s.d. ' + str(std))
funct = partial(calc_one_vert_gauss, xyz=xyz, std=std)
with Pool() as p:
xyz2surf = p.map(funct, surf.vert)
xyz2surf = asarray(xyz2surf)
if exponent is not None:
threshold_value = (1 / (threshold ** exponent))
external_threshold_value = threshold_value
elif std is not None:
threshold_value = gauss(threshold, std)
external_threshold_value = gauss(std, std) # this is around 0.607
lg.debug('Values thresholded at ' + str(threshold_value))
xyz2surf[xyz2surf < threshold_value] = NaN
# here we deal with vertices that are within the threshold value but far
# from a single electrodes, so those remain empty
sumval = nansum(xyz2surf, axis=1)
sumval[sumval < external_threshold_value] = NaN
# normalize by the number of electrodes
xyz2surf /= atleast_2d(sumval).T
xyz2surf[isnan(xyz2surf)] = 0
return xyz2surf | python | def calc_xyz2surf(surf, xyz, threshold=20, exponent=None, std=None):
"""Calculate transformation matrix from xyz values to vertices.
Parameters
----------
surf : instance of wonambi.attr.Surf
the surface of only one hemisphere.
xyz : numpy.ndarray
nChan x 3 matrix, with the locations in x, y, z.
std : float
distance in mm of the Gaussian kernel
exponent : int
inverse law (1-> direct inverse, 2-> inverse square, 3-> inverse cube)
threshold : float
distance in mm for a vertex to pick up electrode activity (if distance
is above the threshold, one electrode does not affect a vertex).
Returns
-------
numpy.ndarray
nVertices X xyz.shape[0] matrix
Notes
-----
This function is a helper when plotting onto brain surface, by creating a
transformation matrix from the values in space (f.e. at each electrode) to
the position of the vertices (used to show the brain surface).
There are many ways to move from values to vertices. The crucial parameter
is the function at which activity decreases in respect to the distance. You
can have an inverse relationship by specifying 'exponent'. If 'exponent' is
2, then the activity will decrease as inverse square of the distance. The
function can be a Gaussian. With std, you specify the width of the gaussian
kernel in mm.
For each vertex, it uses a threshold based on the distance ('threshold'
value, in mm). Finally, it normalizes the contribution of all the channels
to 1, so that the sum of the coefficients for each vertex is 1.
You can also create your own matrix (and skip calc_xyz2surf altogether) and
pass it as attribute to the main figure.
Because it's a loop over all the vertices, this function is pretty slow,
but if you calculate it once, you can reuse it.
We take advantage of multiprocessing, which speeds it up considerably.
"""
if exponent is None and std is None:
exponent = 1
if exponent is not None:
lg.debug('Vertex values based on inverse-law, with exponent ' +
str(exponent))
funct = partial(calc_one_vert_inverse, xyz=xyz, exponent=exponent)
elif std is not None:
lg.debug('Vertex values based on gaussian, with s.d. ' + str(std))
funct = partial(calc_one_vert_gauss, xyz=xyz, std=std)
with Pool() as p:
xyz2surf = p.map(funct, surf.vert)
xyz2surf = asarray(xyz2surf)
if exponent is not None:
threshold_value = (1 / (threshold ** exponent))
external_threshold_value = threshold_value
elif std is not None:
threshold_value = gauss(threshold, std)
external_threshold_value = gauss(std, std) # this is around 0.607
lg.debug('Values thresholded at ' + str(threshold_value))
xyz2surf[xyz2surf < threshold_value] = NaN
# here we deal with vertices that are within the threshold value but far
# from a single electrodes, so those remain empty
sumval = nansum(xyz2surf, axis=1)
sumval[sumval < external_threshold_value] = NaN
# normalize by the number of electrodes
xyz2surf /= atleast_2d(sumval).T
xyz2surf[isnan(xyz2surf)] = 0
return xyz2surf | ['def', 'calc_xyz2surf', '(', 'surf', ',', 'xyz', ',', 'threshold', '=', '20', ',', 'exponent', '=', 'None', ',', 'std', '=', 'None', ')', ':', 'if', 'exponent', 'is', 'None', 'and', 'std', 'is', 'None', ':', 'exponent', '=', '1', 'if', 'exponent', 'is', 'not', 'None', ':', 'lg', '.', 'debug', '(', "'Vertex values based on inverse-law, with exponent '", '+', 'str', '(', 'exponent', ')', ')', 'funct', '=', 'partial', '(', 'calc_one_vert_inverse', ',', 'xyz', '=', 'xyz', ',', 'exponent', '=', 'exponent', ')', 'elif', 'std', 'is', 'not', 'None', ':', 'lg', '.', 'debug', '(', "'Vertex values based on gaussian, with s.d. '", '+', 'str', '(', 'std', ')', ')', 'funct', '=', 'partial', '(', 'calc_one_vert_gauss', ',', 'xyz', '=', 'xyz', ',', 'std', '=', 'std', ')', 'with', 'Pool', '(', ')', 'as', 'p', ':', 'xyz2surf', '=', 'p', '.', 'map', '(', 'funct', ',', 'surf', '.', 'vert', ')', 'xyz2surf', '=', 'asarray', '(', 'xyz2surf', ')', 'if', 'exponent', 'is', 'not', 'None', ':', 'threshold_value', '=', '(', '1', '/', '(', 'threshold', '**', 'exponent', ')', ')', 'external_threshold_value', '=', 'threshold_value', 'elif', 'std', 'is', 'not', 'None', ':', 'threshold_value', '=', 'gauss', '(', 'threshold', ',', 'std', ')', 'external_threshold_value', '=', 'gauss', '(', 'std', ',', 'std', ')', '# this is around 0.607', 'lg', '.', 'debug', '(', "'Values thresholded at '", '+', 'str', '(', 'threshold_value', ')', ')', 'xyz2surf', '[', 'xyz2surf', '<', 'threshold_value', ']', '=', 'NaN', '# here we deal with vertices that are within the threshold value but far', '# from a single electrodes, so those remain empty', 'sumval', '=', 'nansum', '(', 'xyz2surf', ',', 'axis', '=', '1', ')', 'sumval', '[', 'sumval', '<', 'external_threshold_value', ']', '=', 'NaN', '# normalize by the number of electrodes', 'xyz2surf', '/=', 'atleast_2d', '(', 'sumval', ')', '.', 'T', 'xyz2surf', '[', 'isnan', '(', 'xyz2surf', ')', ']', '=', '0', 'return', 'xyz2surf'] | Calculate transformation matrix from xyz values to vertices.
Parameters
----------
surf : instance of wonambi.attr.Surf
the surface of only one hemisphere.
xyz : numpy.ndarray
nChan x 3 matrix, with the locations in x, y, z.
std : float
distance in mm of the Gaussian kernel
exponent : int
inverse law (1-> direct inverse, 2-> inverse square, 3-> inverse cube)
threshold : float
distance in mm for a vertex to pick up electrode activity (if distance
is above the threshold, one electrode does not affect a vertex).
Returns
-------
numpy.ndarray
nVertices X xyz.shape[0] matrix
Notes
-----
This function is a helper when plotting onto brain surface, by creating a
transformation matrix from the values in space (f.e. at each electrode) to
the position of the vertices (used to show the brain surface).
There are many ways to move from values to vertices. The crucial parameter
is the function at which activity decreases in respect to the distance. You
can have an inverse relationship by specifying 'exponent'. If 'exponent' is
2, then the activity will decrease as inverse square of the distance. The
function can be a Gaussian. With std, you specify the width of the gaussian
kernel in mm.
For each vertex, it uses a threshold based on the distance ('threshold'
value, in mm). Finally, it normalizes the contribution of all the channels
to 1, so that the sum of the coefficients for each vertex is 1.
You can also create your own matrix (and skip calc_xyz2surf altogether) and
pass it as attribute to the main figure.
Because it's a loop over all the vertices, this function is pretty slow,
but if you calculate it once, you can reuse it.
We take advantage of multiprocessing, which speeds it up considerably. | ['Calculate', 'transformation', 'matrix', 'from', 'xyz', 'values', 'to', 'vertices', '.'] | train | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/source/linear.py#L57-L136 |
2,085 | Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/npm/npm_client.py | NpmClient.update_package | def update_package(self, package_version_details, feed_id, package_name, package_version):
"""UpdatePackage.
[Preview API]
:param :class:`<PackageVersionDetails> <azure.devops.v5_0.npm.models.PackageVersionDetails>` package_version_details:
:param str feed_id:
:param str package_name:
:param str package_version:
:rtype: :class:`<Package> <azure.devops.v5_0.npm.models.Package>`
"""
route_values = {}
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'PackageVersionDetails')
response = self._send(http_method='PATCH',
location_id='ed579d62-67c9-4271-be66-9b029af5bcf9',
version='5.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Package', response) | python | def update_package(self, package_version_details, feed_id, package_name, package_version):
"""UpdatePackage.
[Preview API]
:param :class:`<PackageVersionDetails> <azure.devops.v5_0.npm.models.PackageVersionDetails>` package_version_details:
:param str feed_id:
:param str package_name:
:param str package_version:
:rtype: :class:`<Package> <azure.devops.v5_0.npm.models.Package>`
"""
route_values = {}
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_name is not None:
route_values['packageName'] = self._serialize.url('package_name', package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'PackageVersionDetails')
response = self._send(http_method='PATCH',
location_id='ed579d62-67c9-4271-be66-9b029af5bcf9',
version='5.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Package', response) | ['def', 'update_package', '(', 'self', ',', 'package_version_details', ',', 'feed_id', ',', 'package_name', ',', 'package_version', ')', ':', 'route_values', '=', '{', '}', 'if', 'feed_id', 'is', 'not', 'None', ':', 'route_values', '[', "'feedId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'feed_id'", ',', 'feed_id', ',', "'str'", ')', 'if', 'package_name', 'is', 'not', 'None', ':', 'route_values', '[', "'packageName'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'package_name'", ',', 'package_name', ',', "'str'", ')', 'if', 'package_version', 'is', 'not', 'None', ':', 'route_values', '[', "'packageVersion'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'package_version'", ',', 'package_version', ',', "'str'", ')', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'package_version_details', ',', "'PackageVersionDetails'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'PATCH'", ',', 'location_id', '=', "'ed579d62-67c9-4271-be66-9b029af5bcf9'", ',', 'version', '=', "'5.0-preview.1'", ',', 'route_values', '=', 'route_values', ',', 'content', '=', 'content', ')', 'return', 'self', '.', '_deserialize', '(', "'Package'", ',', 'response', ')'] | UpdatePackage.
[Preview API]
:param :class:`<PackageVersionDetails> <azure.devops.v5_0.npm.models.PackageVersionDetails>` package_version_details:
:param str feed_id:
:param str package_name:
:param str package_version:
:rtype: :class:`<Package> <azure.devops.v5_0.npm.models.Package>` | ['UpdatePackage', '.', '[', 'Preview', 'API', ']', ':', 'param', ':', 'class', ':', '<PackageVersionDetails', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'npm', '.', 'models', '.', 'PackageVersionDetails', '>', 'package_version_details', ':', ':', 'param', 'str', 'feed_id', ':', ':', 'param', 'str', 'package_name', ':', ':', 'param', 'str', 'package_version', ':', ':', 'rtype', ':', ':', 'class', ':', '<Package', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'npm', '.', 'models', '.', 'Package', '>'] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/npm/npm_client.py#L404-L426 |
2,086 | dnephin/PyStaticConfiguration | staticconf/config.py | get_namespaces_from_names | def get_namespaces_from_names(name, all_names):
"""Return a generator which yields namespace objects."""
names = configuration_namespaces.keys() if all_names else [name]
for name in names:
yield get_namespace(name) | python | def get_namespaces_from_names(name, all_names):
"""Return a generator which yields namespace objects."""
names = configuration_namespaces.keys() if all_names else [name]
for name in names:
yield get_namespace(name) | ['def', 'get_namespaces_from_names', '(', 'name', ',', 'all_names', ')', ':', 'names', '=', 'configuration_namespaces', '.', 'keys', '(', ')', 'if', 'all_names', 'else', '[', 'name', ']', 'for', 'name', 'in', 'names', ':', 'yield', 'get_namespace', '(', 'name', ')'] | Return a generator which yields namespace objects. | ['Return', 'a', 'generator', 'which', 'yields', 'namespace', 'objects', '.'] | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L181-L185 |
2,087 | amaas-fintech/amaas-core-sdk-python | amaascore/market_data/interface.py | MarketDataInterface.get_brokendate_fx_forward_rate | def get_brokendate_fx_forward_rate(self, asset_manager_id, asset_id, price_date, value_date):
"""
This method takes calculates broken date forward FX rate based on the passed in parameters
"""
self.logger.info('Calculate broken date FX Forward - Asset Manager: %s - Asset (currency): %s - Price Date: %s - Value Date: %s', asset_manager_id, asset_id, price_date, value_date)
url = '%s/brokendateforward/%s' % (self.endpoint, asset_manager_id)
params = {'value_date': value_date, 'asset_id':asset_id, 'price_date': price_date}
response = self.session.get(url=url, params = params)
if response.ok:
forward_rate = response.json()
self.logger.info('Retrieved broken date FX forward rate %s - %s: %s', asset_id, price_date, value_date)
return forward_rate
else:
self.logger.error(response.text)
response.raise_for_status() | python | def get_brokendate_fx_forward_rate(self, asset_manager_id, asset_id, price_date, value_date):
"""
This method takes calculates broken date forward FX rate based on the passed in parameters
"""
self.logger.info('Calculate broken date FX Forward - Asset Manager: %s - Asset (currency): %s - Price Date: %s - Value Date: %s', asset_manager_id, asset_id, price_date, value_date)
url = '%s/brokendateforward/%s' % (self.endpoint, asset_manager_id)
params = {'value_date': value_date, 'asset_id':asset_id, 'price_date': price_date}
response = self.session.get(url=url, params = params)
if response.ok:
forward_rate = response.json()
self.logger.info('Retrieved broken date FX forward rate %s - %s: %s', asset_id, price_date, value_date)
return forward_rate
else:
self.logger.error(response.text)
response.raise_for_status() | ['def', 'get_brokendate_fx_forward_rate', '(', 'self', ',', 'asset_manager_id', ',', 'asset_id', ',', 'price_date', ',', 'value_date', ')', ':', 'self', '.', 'logger', '.', 'info', '(', "'Calculate broken date FX Forward - Asset Manager: %s - Asset (currency): %s - Price Date: %s - Value Date: %s'", ',', 'asset_manager_id', ',', 'asset_id', ',', 'price_date', ',', 'value_date', ')', 'url', '=', "'%s/brokendateforward/%s'", '%', '(', 'self', '.', 'endpoint', ',', 'asset_manager_id', ')', 'params', '=', '{', "'value_date'", ':', 'value_date', ',', "'asset_id'", ':', 'asset_id', ',', "'price_date'", ':', 'price_date', '}', 'response', '=', 'self', '.', 'session', '.', 'get', '(', 'url', '=', 'url', ',', 'params', '=', 'params', ')', 'if', 'response', '.', 'ok', ':', 'forward_rate', '=', 'response', '.', 'json', '(', ')', 'self', '.', 'logger', '.', 'info', '(', "'Retrieved broken date FX forward rate %s - %s: %s'", ',', 'asset_id', ',', 'price_date', ',', 'value_date', ')', 'return', 'forward_rate', 'else', ':', 'self', '.', 'logger', '.', 'error', '(', 'response', '.', 'text', ')', 'response', '.', 'raise_for_status', '(', ')'] | This method takes calculates broken date forward FX rate based on the passed in parameters | ['This', 'method', 'takes', 'calculates', 'broken', 'date', 'forward', 'FX', 'rate', 'based', 'on', 'the', 'passed', 'in', 'parameters'] | train | https://github.com/amaas-fintech/amaas-core-sdk-python/blob/347b71f8e776b2dde582b015e31b4802d91e8040/amaascore/market_data/interface.py#L225-L239 |
2,088 | wright-group/WrightTools | WrightTools/artists/_helpers.py | add_sideplot | def add_sideplot(
ax,
along,
pad=0.,
*,
grid=True,
zero_line=True,
arrs_to_bin=None,
normalize_bin=True,
ymin=0,
ymax=1.1,
height=0.75,
c="C0"
):
"""Add a sideplot to an axis. Sideplots share their corresponding axis.
Parameters
----------
ax : matplotlib AxesSubplot object
The axis to add a sideplot along.
along : {'x', 'y'}
The dimension to add a sideplot along.
pad : number (optional)
Distance between axis and sideplot. Default is 0.
grid : bool (optional)
Toggle for plotting grid on sideplot. Default is True.
zero_line : bool (optional)
Toggle for plotting black line at zero signal. Default is True.
arrs_to_bin : list [xi, yi, zi] (optional)
Bins are plotted if arrays are supplied. Default is None.
normalize_bin : bool (optional)
Normalize bin by max value. Default is True.
ymin : number (optional)
Bin minimum extent. Default is 0.
ymax : number (optional)
Bin maximum extent. Default is 1.1
c : string (optional)
Line color. Default is C0.
Returns
-------
axCorr
AxesSubplot object
"""
# divider should only be created once
if hasattr(ax, "WrightTools_sideplot_divider"):
divider = ax.WrightTools_sideplot_divider
else:
divider = make_axes_locatable(ax)
setattr(ax, "WrightTools_sideplot_divider", divider)
# create sideplot axis
if along == "x":
axCorr = divider.append_axes("top", height, pad=pad, sharex=ax)
elif along == "y":
axCorr = divider.append_axes("right", height, pad=pad, sharey=ax)
axCorr.autoscale(False)
axCorr.set_adjustable("box")
# bin
if arrs_to_bin is not None:
xi, yi, zi = arrs_to_bin
if along == "x":
b = np.nansum(zi, axis=0) * len(yi)
if normalize_bin:
b /= np.nanmax(b)
axCorr.plot(xi, b, c=c, lw=2)
elif along == "y":
b = np.nansum(zi, axis=1) * len(xi)
if normalize_bin:
b /= np.nanmax(b)
axCorr.plot(b, yi, c=c, lw=2)
# beautify
if along == "x":
axCorr.set_ylim(ymin, ymax)
axCorr.tick_params(axis="x", which="both", length=0)
elif along == "y":
axCorr.set_xlim(ymin, ymax)
axCorr.tick_params(axis="y", which="both", length=0)
plt.grid(grid)
if zero_line:
if along == "x":
plt.axhline(0, c="k", lw=1)
elif along == "y":
plt.axvline(0, c="k", lw=1)
plt.setp(axCorr.get_xticklabels(), visible=False)
plt.setp(axCorr.get_yticklabels(), visible=False)
return axCorr | python | def add_sideplot(
ax,
along,
pad=0.,
*,
grid=True,
zero_line=True,
arrs_to_bin=None,
normalize_bin=True,
ymin=0,
ymax=1.1,
height=0.75,
c="C0"
):
"""Add a sideplot to an axis. Sideplots share their corresponding axis.
Parameters
----------
ax : matplotlib AxesSubplot object
The axis to add a sideplot along.
along : {'x', 'y'}
The dimension to add a sideplot along.
pad : number (optional)
Distance between axis and sideplot. Default is 0.
grid : bool (optional)
Toggle for plotting grid on sideplot. Default is True.
zero_line : bool (optional)
Toggle for plotting black line at zero signal. Default is True.
arrs_to_bin : list [xi, yi, zi] (optional)
Bins are plotted if arrays are supplied. Default is None.
normalize_bin : bool (optional)
Normalize bin by max value. Default is True.
ymin : number (optional)
Bin minimum extent. Default is 0.
ymax : number (optional)
Bin maximum extent. Default is 1.1
c : string (optional)
Line color. Default is C0.
Returns
-------
axCorr
AxesSubplot object
"""
# divider should only be created once
if hasattr(ax, "WrightTools_sideplot_divider"):
divider = ax.WrightTools_sideplot_divider
else:
divider = make_axes_locatable(ax)
setattr(ax, "WrightTools_sideplot_divider", divider)
# create sideplot axis
if along == "x":
axCorr = divider.append_axes("top", height, pad=pad, sharex=ax)
elif along == "y":
axCorr = divider.append_axes("right", height, pad=pad, sharey=ax)
axCorr.autoscale(False)
axCorr.set_adjustable("box")
# bin
if arrs_to_bin is not None:
xi, yi, zi = arrs_to_bin
if along == "x":
b = np.nansum(zi, axis=0) * len(yi)
if normalize_bin:
b /= np.nanmax(b)
axCorr.plot(xi, b, c=c, lw=2)
elif along == "y":
b = np.nansum(zi, axis=1) * len(xi)
if normalize_bin:
b /= np.nanmax(b)
axCorr.plot(b, yi, c=c, lw=2)
# beautify
if along == "x":
axCorr.set_ylim(ymin, ymax)
axCorr.tick_params(axis="x", which="both", length=0)
elif along == "y":
axCorr.set_xlim(ymin, ymax)
axCorr.tick_params(axis="y", which="both", length=0)
plt.grid(grid)
if zero_line:
if along == "x":
plt.axhline(0, c="k", lw=1)
elif along == "y":
plt.axvline(0, c="k", lw=1)
plt.setp(axCorr.get_xticklabels(), visible=False)
plt.setp(axCorr.get_yticklabels(), visible=False)
return axCorr | ['def', 'add_sideplot', '(', 'ax', ',', 'along', ',', 'pad', '=', '0.', ',', '*', ',', 'grid', '=', 'True', ',', 'zero_line', '=', 'True', ',', 'arrs_to_bin', '=', 'None', ',', 'normalize_bin', '=', 'True', ',', 'ymin', '=', '0', ',', 'ymax', '=', '1.1', ',', 'height', '=', '0.75', ',', 'c', '=', '"C0"', ')', ':', '# divider should only be created once', 'if', 'hasattr', '(', 'ax', ',', '"WrightTools_sideplot_divider"', ')', ':', 'divider', '=', 'ax', '.', 'WrightTools_sideplot_divider', 'else', ':', 'divider', '=', 'make_axes_locatable', '(', 'ax', ')', 'setattr', '(', 'ax', ',', '"WrightTools_sideplot_divider"', ',', 'divider', ')', '# create sideplot axis', 'if', 'along', '==', '"x"', ':', 'axCorr', '=', 'divider', '.', 'append_axes', '(', '"top"', ',', 'height', ',', 'pad', '=', 'pad', ',', 'sharex', '=', 'ax', ')', 'elif', 'along', '==', '"y"', ':', 'axCorr', '=', 'divider', '.', 'append_axes', '(', '"right"', ',', 'height', ',', 'pad', '=', 'pad', ',', 'sharey', '=', 'ax', ')', 'axCorr', '.', 'autoscale', '(', 'False', ')', 'axCorr', '.', 'set_adjustable', '(', '"box"', ')', '# bin', 'if', 'arrs_to_bin', 'is', 'not', 'None', ':', 'xi', ',', 'yi', ',', 'zi', '=', 'arrs_to_bin', 'if', 'along', '==', '"x"', ':', 'b', '=', 'np', '.', 'nansum', '(', 'zi', ',', 'axis', '=', '0', ')', '*', 'len', '(', 'yi', ')', 'if', 'normalize_bin', ':', 'b', '/=', 'np', '.', 'nanmax', '(', 'b', ')', 'axCorr', '.', 'plot', '(', 'xi', ',', 'b', ',', 'c', '=', 'c', ',', 'lw', '=', '2', ')', 'elif', 'along', '==', '"y"', ':', 'b', '=', 'np', '.', 'nansum', '(', 'zi', ',', 'axis', '=', '1', ')', '*', 'len', '(', 'xi', ')', 'if', 'normalize_bin', ':', 'b', '/=', 'np', '.', 'nanmax', '(', 'b', ')', 'axCorr', '.', 'plot', '(', 'b', ',', 'yi', ',', 'c', '=', 'c', ',', 'lw', '=', '2', ')', '# beautify', 'if', 'along', '==', '"x"', ':', 'axCorr', '.', 'set_ylim', '(', 'ymin', ',', 'ymax', ')', 'axCorr', '.', 'tick_params', '(', 'axis', '=', '"x"', ',', 'which', '=', '"both"', ',', 'length', '=', '0', ')', 'elif', 'along', '==', '"y"', ':', 'axCorr', '.', 'set_xlim', '(', 'ymin', ',', 'ymax', ')', 'axCorr', '.', 'tick_params', '(', 'axis', '=', '"y"', ',', 'which', '=', '"both"', ',', 'length', '=', '0', ')', 'plt', '.', 'grid', '(', 'grid', ')', 'if', 'zero_line', ':', 'if', 'along', '==', '"x"', ':', 'plt', '.', 'axhline', '(', '0', ',', 'c', '=', '"k"', ',', 'lw', '=', '1', ')', 'elif', 'along', '==', '"y"', ':', 'plt', '.', 'axvline', '(', '0', ',', 'c', '=', '"k"', ',', 'lw', '=', '1', ')', 'plt', '.', 'setp', '(', 'axCorr', '.', 'get_xticklabels', '(', ')', ',', 'visible', '=', 'False', ')', 'plt', '.', 'setp', '(', 'axCorr', '.', 'get_yticklabels', '(', ')', ',', 'visible', '=', 'False', ')', 'return', 'axCorr'] | Add a sideplot to an axis. Sideplots share their corresponding axis.
Parameters
----------
ax : matplotlib AxesSubplot object
The axis to add a sideplot along.
along : {'x', 'y'}
The dimension to add a sideplot along.
pad : number (optional)
Distance between axis and sideplot. Default is 0.
grid : bool (optional)
Toggle for plotting grid on sideplot. Default is True.
zero_line : bool (optional)
Toggle for plotting black line at zero signal. Default is True.
arrs_to_bin : list [xi, yi, zi] (optional)
Bins are plotted if arrays are supplied. Default is None.
normalize_bin : bool (optional)
Normalize bin by max value. Default is True.
ymin : number (optional)
Bin minimum extent. Default is 0.
ymax : number (optional)
Bin maximum extent. Default is 1.1
c : string (optional)
Line color. Default is C0.
Returns
-------
axCorr
AxesSubplot object | ['Add', 'a', 'sideplot', 'to', 'an', 'axis', '.', 'Sideplots', 'share', 'their', 'corresponding', 'axis', '.'] | train | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/artists/_helpers.py#L78-L163 |
2,089 | amaas-fintech/amaas-core-sdk-python | amaascore/market_data/fx_rate.py | FXRate.rate_timestamp | def rate_timestamp(self, rate_timestamp):
"""
Force the rate_timestamp to be a datetime
:param rate_timestamp:
:return:
"""
if rate_timestamp is not None:
if isinstance(rate_timestamp, (str, type_check)):
rate_timestamp = parse(rate_timestamp).replace(tzinfo=pytz.utc)
if type(rate_timestamp) == date:
rate_timestamp = datetime.combine(rate_timestamp, datetime.min.time()).replace(tzinfo=pytz.utc)
if not rate_timestamp.tzinfo:
raise ValueError('Cannot set an FX rate timestamp without a timezone')
self._rate_timestamp = rate_timestamp | python | def rate_timestamp(self, rate_timestamp):
"""
Force the rate_timestamp to be a datetime
:param rate_timestamp:
:return:
"""
if rate_timestamp is not None:
if isinstance(rate_timestamp, (str, type_check)):
rate_timestamp = parse(rate_timestamp).replace(tzinfo=pytz.utc)
if type(rate_timestamp) == date:
rate_timestamp = datetime.combine(rate_timestamp, datetime.min.time()).replace(tzinfo=pytz.utc)
if not rate_timestamp.tzinfo:
raise ValueError('Cannot set an FX rate timestamp without a timezone')
self._rate_timestamp = rate_timestamp | ['def', 'rate_timestamp', '(', 'self', ',', 'rate_timestamp', ')', ':', 'if', 'rate_timestamp', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'rate_timestamp', ',', '(', 'str', ',', 'type_check', ')', ')', ':', 'rate_timestamp', '=', 'parse', '(', 'rate_timestamp', ')', '.', 'replace', '(', 'tzinfo', '=', 'pytz', '.', 'utc', ')', 'if', 'type', '(', 'rate_timestamp', ')', '==', 'date', ':', 'rate_timestamp', '=', 'datetime', '.', 'combine', '(', 'rate_timestamp', ',', 'datetime', '.', 'min', '.', 'time', '(', ')', ')', '.', 'replace', '(', 'tzinfo', '=', 'pytz', '.', 'utc', ')', 'if', 'not', 'rate_timestamp', '.', 'tzinfo', ':', 'raise', 'ValueError', '(', "'Cannot set an FX rate timestamp without a timezone'", ')', 'self', '.', '_rate_timestamp', '=', 'rate_timestamp'] | Force the rate_timestamp to be a datetime
:param rate_timestamp:
:return: | ['Force', 'the', 'rate_timestamp', 'to', 'be', 'a', 'datetime', ':', 'param', 'rate_timestamp', ':', ':', 'return', ':'] | train | https://github.com/amaas-fintech/amaas-core-sdk-python/blob/347b71f8e776b2dde582b015e31b4802d91e8040/amaascore/market_data/fx_rate.py#L74-L87 |
2,090 | kumar303/mohawk | mohawk/util.py | normalize_string | def normalize_string(mac_type, resource, content_hash):
"""Serializes mac_type and resource into a HAWK string."""
normalized = [
'hawk.' + str(HAWK_VER) + '.' + mac_type,
normalize_header_attr(resource.timestamp),
normalize_header_attr(resource.nonce),
normalize_header_attr(resource.method or ''),
normalize_header_attr(resource.name or ''),
normalize_header_attr(resource.host),
normalize_header_attr(resource.port),
normalize_header_attr(content_hash or '')
]
# The blank lines are important. They follow what the Node Hawk lib does.
normalized.append(normalize_header_attr(resource.ext or ''))
if resource.app:
normalized.append(normalize_header_attr(resource.app))
normalized.append(normalize_header_attr(resource.dlg or ''))
# Add trailing new line.
normalized.append('')
normalized = '\n'.join(normalized)
return normalized | python | def normalize_string(mac_type, resource, content_hash):
"""Serializes mac_type and resource into a HAWK string."""
normalized = [
'hawk.' + str(HAWK_VER) + '.' + mac_type,
normalize_header_attr(resource.timestamp),
normalize_header_attr(resource.nonce),
normalize_header_attr(resource.method or ''),
normalize_header_attr(resource.name or ''),
normalize_header_attr(resource.host),
normalize_header_attr(resource.port),
normalize_header_attr(content_hash or '')
]
# The blank lines are important. They follow what the Node Hawk lib does.
normalized.append(normalize_header_attr(resource.ext or ''))
if resource.app:
normalized.append(normalize_header_attr(resource.app))
normalized.append(normalize_header_attr(resource.dlg or ''))
# Add trailing new line.
normalized.append('')
normalized = '\n'.join(normalized)
return normalized | ['def', 'normalize_string', '(', 'mac_type', ',', 'resource', ',', 'content_hash', ')', ':', 'normalized', '=', '[', "'hawk.'", '+', 'str', '(', 'HAWK_VER', ')', '+', "'.'", '+', 'mac_type', ',', 'normalize_header_attr', '(', 'resource', '.', 'timestamp', ')', ',', 'normalize_header_attr', '(', 'resource', '.', 'nonce', ')', ',', 'normalize_header_attr', '(', 'resource', '.', 'method', 'or', "''", ')', ',', 'normalize_header_attr', '(', 'resource', '.', 'name', 'or', "''", ')', ',', 'normalize_header_attr', '(', 'resource', '.', 'host', ')', ',', 'normalize_header_attr', '(', 'resource', '.', 'port', ')', ',', 'normalize_header_attr', '(', 'content_hash', 'or', "''", ')', ']', '# The blank lines are important. They follow what the Node Hawk lib does.', 'normalized', '.', 'append', '(', 'normalize_header_attr', '(', 'resource', '.', 'ext', 'or', "''", ')', ')', 'if', 'resource', '.', 'app', ':', 'normalized', '.', 'append', '(', 'normalize_header_attr', '(', 'resource', '.', 'app', ')', ')', 'normalized', '.', 'append', '(', 'normalize_header_attr', '(', 'resource', '.', 'dlg', 'or', "''", ')', ')', '# Add trailing new line.', 'normalized', '.', 'append', '(', "''", ')', 'normalized', '=', "'\\n'", '.', 'join', '(', 'normalized', ')', 'return', 'normalized'] | Serializes mac_type and resource into a HAWK string. | ['Serializes', 'mac_type', 'and', 'resource', 'into', 'a', 'HAWK', 'string', '.'] | train | https://github.com/kumar303/mohawk/blob/037be67ccf50ae704705e67add44e02737a65d21/mohawk/util.py#L109-L136 |
2,091 | albu/albumentations | albumentations/augmentations/functional.py | preserve_channel_dim | def preserve_channel_dim(func):
"""Preserve dummy channel dim."""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
shape = img.shape
result = func(img, *args, **kwargs)
if len(shape) == 3 and shape[-1] == 1 and len(result.shape) == 2:
result = np.expand_dims(result, axis=-1)
return result
return wrapped_function | python | def preserve_channel_dim(func):
"""Preserve dummy channel dim."""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
shape = img.shape
result = func(img, *args, **kwargs)
if len(shape) == 3 and shape[-1] == 1 and len(result.shape) == 2:
result = np.expand_dims(result, axis=-1)
return result
return wrapped_function | ['def', 'preserve_channel_dim', '(', 'func', ')', ':', '@', 'wraps', '(', 'func', ')', 'def', 'wrapped_function', '(', 'img', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'shape', '=', 'img', '.', 'shape', 'result', '=', 'func', '(', 'img', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'if', 'len', '(', 'shape', ')', '==', '3', 'and', 'shape', '[', '-', '1', ']', '==', '1', 'and', 'len', '(', 'result', '.', 'shape', ')', '==', '2', ':', 'result', '=', 'np', '.', 'expand_dims', '(', 'result', ',', 'axis', '=', '-', '1', ')', 'return', 'result', 'return', 'wrapped_function'] | Preserve dummy channel dim. | ['Preserve', 'dummy', 'channel', 'dim', '.'] | train | https://github.com/albu/albumentations/blob/b31393cd6126516d37a84e44c879bd92c68ffc93/albumentations/augmentations/functional.py#L47-L57 |
2,092 | hyperledger/indy-plenum | plenum/common/txn_util.py | reqToTxn | def reqToTxn(req):
"""
Transform a client request such that it can be stored in the ledger.
Also this is what will be returned to the client in the reply
:param req:
:return:
"""
if isinstance(req, str):
req = json.loads(req)
if isinstance(req, dict):
kwargs = dict(
identifier=req.get(f.IDENTIFIER.nm, None),
reqId=req.get(f.REQ_ID.nm, None),
operation=req.get(OPERATION, None),
signature=req.get(f.SIG.nm, None),
signatures=req.get(f.SIGS.nm, None),
protocolVersion=req.get(f.PROTOCOL_VERSION.nm, None)
)
req = TxnUtilConfig.client_request_class(**kwargs)
if isinstance(req, Request):
req_data = req.as_dict
req_data[f.DIGEST.nm] = req.digest
req_data[f.PAYLOAD_DIGEST.nm] = req.payload_digest
else:
raise TypeError(
"Expected dict or str as input, but got: {}".format(type(req)))
req_data = deepcopy(req_data)
return do_req_to_txn(req_data=req_data,
req_op=req_data[OPERATION]) | python | def reqToTxn(req):
"""
Transform a client request such that it can be stored in the ledger.
Also this is what will be returned to the client in the reply
:param req:
:return:
"""
if isinstance(req, str):
req = json.loads(req)
if isinstance(req, dict):
kwargs = dict(
identifier=req.get(f.IDENTIFIER.nm, None),
reqId=req.get(f.REQ_ID.nm, None),
operation=req.get(OPERATION, None),
signature=req.get(f.SIG.nm, None),
signatures=req.get(f.SIGS.nm, None),
protocolVersion=req.get(f.PROTOCOL_VERSION.nm, None)
)
req = TxnUtilConfig.client_request_class(**kwargs)
if isinstance(req, Request):
req_data = req.as_dict
req_data[f.DIGEST.nm] = req.digest
req_data[f.PAYLOAD_DIGEST.nm] = req.payload_digest
else:
raise TypeError(
"Expected dict or str as input, but got: {}".format(type(req)))
req_data = deepcopy(req_data)
return do_req_to_txn(req_data=req_data,
req_op=req_data[OPERATION]) | ['def', 'reqToTxn', '(', 'req', ')', ':', 'if', 'isinstance', '(', 'req', ',', 'str', ')', ':', 'req', '=', 'json', '.', 'loads', '(', 'req', ')', 'if', 'isinstance', '(', 'req', ',', 'dict', ')', ':', 'kwargs', '=', 'dict', '(', 'identifier', '=', 'req', '.', 'get', '(', 'f', '.', 'IDENTIFIER', '.', 'nm', ',', 'None', ')', ',', 'reqId', '=', 'req', '.', 'get', '(', 'f', '.', 'REQ_ID', '.', 'nm', ',', 'None', ')', ',', 'operation', '=', 'req', '.', 'get', '(', 'OPERATION', ',', 'None', ')', ',', 'signature', '=', 'req', '.', 'get', '(', 'f', '.', 'SIG', '.', 'nm', ',', 'None', ')', ',', 'signatures', '=', 'req', '.', 'get', '(', 'f', '.', 'SIGS', '.', 'nm', ',', 'None', ')', ',', 'protocolVersion', '=', 'req', '.', 'get', '(', 'f', '.', 'PROTOCOL_VERSION', '.', 'nm', ',', 'None', ')', ')', 'req', '=', 'TxnUtilConfig', '.', 'client_request_class', '(', '*', '*', 'kwargs', ')', 'if', 'isinstance', '(', 'req', ',', 'Request', ')', ':', 'req_data', '=', 'req', '.', 'as_dict', 'req_data', '[', 'f', '.', 'DIGEST', '.', 'nm', ']', '=', 'req', '.', 'digest', 'req_data', '[', 'f', '.', 'PAYLOAD_DIGEST', '.', 'nm', ']', '=', 'req', '.', 'payload_digest', 'else', ':', 'raise', 'TypeError', '(', '"Expected dict or str as input, but got: {}"', '.', 'format', '(', 'type', '(', 'req', ')', ')', ')', 'req_data', '=', 'deepcopy', '(', 'req_data', ')', 'return', 'do_req_to_txn', '(', 'req_data', '=', 'req_data', ',', 'req_op', '=', 'req_data', '[', 'OPERATION', ']', ')'] | Transform a client request such that it can be stored in the ledger.
Also this is what will be returned to the client in the reply
:param req:
:return: | ['Transform', 'a', 'client', 'request', 'such', 'that', 'it', 'can', 'be', 'stored', 'in', 'the', 'ledger', '.', 'Also', 'this', 'is', 'what', 'will', 'be', 'returned', 'to', 'the', 'client', 'in', 'the', 'reply', ':', 'param', 'req', ':', ':', 'return', ':'] | train | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/txn_util.py#L224-L253 |
2,093 | spotify/gordon | gordon/plugins_loader.py | load_plugins | def load_plugins(config, plugin_kwargs):
"""
Discover and instantiate plugins.
Args:
config (dict): loaded configuration for the Gordon service.
plugin_kwargs (dict): keyword arguments to give to plugins
during instantiation.
Returns:
Tuple of 3 lists: list of names of plugins, list of
instantiated plugin objects, and any errors encountered while
loading/instantiating plugins. A tuple of three empty lists is
returned if there are no plugins found or activated in gordon
config.
"""
installed_plugins = _gather_installed_plugins()
metrics_plugin = _get_metrics_plugin(config, installed_plugins)
if metrics_plugin:
plugin_kwargs['metrics'] = metrics_plugin
active_plugins = _get_activated_plugins(config, installed_plugins)
if not active_plugins:
return [], [], [], None
plugin_namespaces = _get_plugin_config_keys(active_plugins)
plugin_configs = _load_plugin_configs(plugin_namespaces, config)
plugin_names, plugins, errors = _init_plugins(
active_plugins, installed_plugins, plugin_configs, plugin_kwargs)
return plugin_names, plugins, errors, plugin_kwargs | python | def load_plugins(config, plugin_kwargs):
"""
Discover and instantiate plugins.
Args:
config (dict): loaded configuration for the Gordon service.
plugin_kwargs (dict): keyword arguments to give to plugins
during instantiation.
Returns:
Tuple of 3 lists: list of names of plugins, list of
instantiated plugin objects, and any errors encountered while
loading/instantiating plugins. A tuple of three empty lists is
returned if there are no plugins found or activated in gordon
config.
"""
installed_plugins = _gather_installed_plugins()
metrics_plugin = _get_metrics_plugin(config, installed_plugins)
if metrics_plugin:
plugin_kwargs['metrics'] = metrics_plugin
active_plugins = _get_activated_plugins(config, installed_plugins)
if not active_plugins:
return [], [], [], None
plugin_namespaces = _get_plugin_config_keys(active_plugins)
plugin_configs = _load_plugin_configs(plugin_namespaces, config)
plugin_names, plugins, errors = _init_plugins(
active_plugins, installed_plugins, plugin_configs, plugin_kwargs)
return plugin_names, plugins, errors, plugin_kwargs | ['def', 'load_plugins', '(', 'config', ',', 'plugin_kwargs', ')', ':', 'installed_plugins', '=', '_gather_installed_plugins', '(', ')', 'metrics_plugin', '=', '_get_metrics_plugin', '(', 'config', ',', 'installed_plugins', ')', 'if', 'metrics_plugin', ':', 'plugin_kwargs', '[', "'metrics'", ']', '=', 'metrics_plugin', 'active_plugins', '=', '_get_activated_plugins', '(', 'config', ',', 'installed_plugins', ')', 'if', 'not', 'active_plugins', ':', 'return', '[', ']', ',', '[', ']', ',', '[', ']', ',', 'None', 'plugin_namespaces', '=', '_get_plugin_config_keys', '(', 'active_plugins', ')', 'plugin_configs', '=', '_load_plugin_configs', '(', 'plugin_namespaces', ',', 'config', ')', 'plugin_names', ',', 'plugins', ',', 'errors', '=', '_init_plugins', '(', 'active_plugins', ',', 'installed_plugins', ',', 'plugin_configs', ',', 'plugin_kwargs', ')', 'return', 'plugin_names', ',', 'plugins', ',', 'errors', ',', 'plugin_kwargs'] | Discover and instantiate plugins.
Args:
config (dict): loaded configuration for the Gordon service.
plugin_kwargs (dict): keyword arguments to give to plugins
during instantiation.
Returns:
Tuple of 3 lists: list of names of plugins, list of
instantiated plugin objects, and any errors encountered while
loading/instantiating plugins. A tuple of three empty lists is
returned if there are no plugins found or activated in gordon
config. | ['Discover', 'and', 'instantiate', 'plugins', '.'] | train | https://github.com/spotify/gordon/blob/8dbf54a032cfaa8f003264682456236b6a69c039/gordon/plugins_loader.py#L210-L237 |
2,094 | totalgood/nlpia | src/nlpia/futil.py | find_filepath | def find_filepath(
filename,
basepaths=(os.path.curdir, DATA_PATH, BIGDATA_PATH, BASE_DIR, '~', '~/Downloads', os.path.join('/', 'tmp'), '..')):
""" Given a filename or path see if it exists in any of the common places datafiles might be
>>> p = find_filepath('iq_test.csv')
>>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv'))
True
>>> p[-len('iq_test.csv'):]
'iq_test.csv'
>>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent')
False
"""
if os.path.isfile(filename):
return filename
for basedir in basepaths:
fullpath = expand_filepath(os.path.join(basedir, filename))
if os.path.isfile(fullpath):
return fullpath
return False | python | def find_filepath(
filename,
basepaths=(os.path.curdir, DATA_PATH, BIGDATA_PATH, BASE_DIR, '~', '~/Downloads', os.path.join('/', 'tmp'), '..')):
""" Given a filename or path see if it exists in any of the common places datafiles might be
>>> p = find_filepath('iq_test.csv')
>>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv'))
True
>>> p[-len('iq_test.csv'):]
'iq_test.csv'
>>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent')
False
"""
if os.path.isfile(filename):
return filename
for basedir in basepaths:
fullpath = expand_filepath(os.path.join(basedir, filename))
if os.path.isfile(fullpath):
return fullpath
return False | ['def', 'find_filepath', '(', 'filename', ',', 'basepaths', '=', '(', 'os', '.', 'path', '.', 'curdir', ',', 'DATA_PATH', ',', 'BIGDATA_PATH', ',', 'BASE_DIR', ',', "'~'", ',', "'~/Downloads'", ',', 'os', '.', 'path', '.', 'join', '(', "'/'", ',', "'tmp'", ')', ',', "'..'", ')', ')', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'filename', ')', ':', 'return', 'filename', 'for', 'basedir', 'in', 'basepaths', ':', 'fullpath', '=', 'expand_filepath', '(', 'os', '.', 'path', '.', 'join', '(', 'basedir', ',', 'filename', ')', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'fullpath', ')', ':', 'return', 'fullpath', 'return', 'False'] | Given a filename or path see if it exists in any of the common places datafiles might be
>>> p = find_filepath('iq_test.csv')
>>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv'))
True
>>> p[-len('iq_test.csv'):]
'iq_test.csv'
>>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent')
False | ['Given', 'a', 'filename', 'or', 'path', 'see', 'if', 'it', 'exists', 'in', 'any', 'of', 'the', 'common', 'places', 'datafiles', 'might', 'be'] | train | https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/futil.py#L302-L321 |
2,095 | hanguokai/youku | youku/youku_comments.py | YoukuComments.create_comment | def create_comment(self, access_token, video_id, content,
reply_id=None, captcha_key=None, captcha_text=None):
"""doc: http://open.youku.com/docs/doc?id=41
"""
url = 'https://openapi.youku.com/v2/comments/create.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'video_id': video_id,
'content': content,
'reply_id': reply_id,
'captcha_key': captcha_key,
'captcha_text': captcha_text
}
data = remove_none_value(data)
r = requests.post(url, data=data)
check_error(r)
return r.json()['id'] | python | def create_comment(self, access_token, video_id, content,
reply_id=None, captcha_key=None, captcha_text=None):
"""doc: http://open.youku.com/docs/doc?id=41
"""
url = 'https://openapi.youku.com/v2/comments/create.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'video_id': video_id,
'content': content,
'reply_id': reply_id,
'captcha_key': captcha_key,
'captcha_text': captcha_text
}
data = remove_none_value(data)
r = requests.post(url, data=data)
check_error(r)
return r.json()['id'] | ['def', 'create_comment', '(', 'self', ',', 'access_token', ',', 'video_id', ',', 'content', ',', 'reply_id', '=', 'None', ',', 'captcha_key', '=', 'None', ',', 'captcha_text', '=', 'None', ')', ':', 'url', '=', "'https://openapi.youku.com/v2/comments/create.json'", 'data', '=', '{', "'client_id'", ':', 'self', '.', 'client_id', ',', "'access_token'", ':', 'access_token', ',', "'video_id'", ':', 'video_id', ',', "'content'", ':', 'content', ',', "'reply_id'", ':', 'reply_id', ',', "'captcha_key'", ':', 'captcha_key', ',', "'captcha_text'", ':', 'captcha_text', '}', 'data', '=', 'remove_none_value', '(', 'data', ')', 'r', '=', 'requests', '.', 'post', '(', 'url', ',', 'data', '=', 'data', ')', 'check_error', '(', 'r', ')', 'return', 'r', '.', 'json', '(', ')', '[', "'id'", ']'] | doc: http://open.youku.com/docs/doc?id=41 | ['doc', ':', 'http', ':', '//', 'open', '.', 'youku', '.', 'com', '/', 'docs', '/', 'doc?id', '=', '41'] | train | https://github.com/hanguokai/youku/blob/b2df060c7dccfad990bcfa289fff68bb77d1e69b/youku/youku_comments.py#L129-L146 |
2,096 | brocade/pynos | pynos/versions/ver_7/ver_7_0_0/interface.py | Interface.evpn_instance_mac_timer_max_count | def evpn_instance_mac_timer_max_count(self, **kwargs):
"""
Add "Duplicate MAC max count" under evpn instance.
Args:
evpn_instance_name: Instance name for evpn
max_count: Duplicate MAC max count.
enable (bool): If target community needs to be enabled
or disabled.Default:``True``.
get (bool) : Get config instead of editing config. (True, False)
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if 'evpn_instance_name' is not passed.
ValueError: if 'evpn_instance_name' is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output=dev.interface.evpn_instance_mac_timer_max_count(
... evpn_instance_name='100',
... max_count='10'
... rbridge_id='1')
... output=dev.interface.evpn_instance_mac_timer_max_count(
... get=True,
... evpn_instance_name='100',
... max_count='10'
... rbridge_id='1')
... output=dev.interface.evpn_instance_mac_timer_max_count(
... enable=False,
... evpn_instance_name='101',
... max_count='10'
... rbridge_id='1')
... output=dev.interface.evpn_instance_mac_timer_max_count(
... get=True,
... evpn_instance_name='101',
... rbridge_id='1')
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
evpn_instance_name = kwargs.pop('evpn_instance_name', '')
max_count = kwargs.pop('max_count', '5')
enable = kwargs.pop('enable', True)
get = kwargs.pop('get', False)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
evpn_args = dict(instance_name=evpn_instance_name,
max_count=max_count)
if get:
enable = None
method_name = 'rbridge_id_evpn_instance_duplicate_'\
'mac_timer_max_count'
method_class = self._rbridge
evpn_args['rbridge_id'] = rbridge_id
evpn_instance_mac_timer_max_count = getattr(method_class, method_name)
config = evpn_instance_mac_timer_max_count(**evpn_args)
if get:
return callback(config, handler='get_config')
if not enable:
config.find('.//*duplicate-mac-timer').set('operation', 'delete')
return callback(config) | python | def evpn_instance_mac_timer_max_count(self, **kwargs):
"""
Add "Duplicate MAC max count" under evpn instance.
Args:
evpn_instance_name: Instance name for evpn
max_count: Duplicate MAC max count.
enable (bool): If target community needs to be enabled
or disabled.Default:``True``.
get (bool) : Get config instead of editing config. (True, False)
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if 'evpn_instance_name' is not passed.
ValueError: if 'evpn_instance_name' is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output=dev.interface.evpn_instance_mac_timer_max_count(
... evpn_instance_name='100',
... max_count='10'
... rbridge_id='1')
... output=dev.interface.evpn_instance_mac_timer_max_count(
... get=True,
... evpn_instance_name='100',
... max_count='10'
... rbridge_id='1')
... output=dev.interface.evpn_instance_mac_timer_max_count(
... enable=False,
... evpn_instance_name='101',
... max_count='10'
... rbridge_id='1')
... output=dev.interface.evpn_instance_mac_timer_max_count(
... get=True,
... evpn_instance_name='101',
... rbridge_id='1')
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
evpn_instance_name = kwargs.pop('evpn_instance_name', '')
max_count = kwargs.pop('max_count', '5')
enable = kwargs.pop('enable', True)
get = kwargs.pop('get', False)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
evpn_args = dict(instance_name=evpn_instance_name,
max_count=max_count)
if get:
enable = None
method_name = 'rbridge_id_evpn_instance_duplicate_'\
'mac_timer_max_count'
method_class = self._rbridge
evpn_args['rbridge_id'] = rbridge_id
evpn_instance_mac_timer_max_count = getattr(method_class, method_name)
config = evpn_instance_mac_timer_max_count(**evpn_args)
if get:
return callback(config, handler='get_config')
if not enable:
config.find('.//*duplicate-mac-timer').set('operation', 'delete')
return callback(config) | ['def', 'evpn_instance_mac_timer_max_count', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'evpn_instance_name', '=', 'kwargs', '.', 'pop', '(', "'evpn_instance_name'", ',', "''", ')', 'max_count', '=', 'kwargs', '.', 'pop', '(', "'max_count'", ',', "'5'", ')', 'enable', '=', 'kwargs', '.', 'pop', '(', "'enable'", ',', 'True', ')', 'get', '=', 'kwargs', '.', 'pop', '(', "'get'", ',', 'False', ')', 'rbridge_id', '=', 'kwargs', '.', 'pop', '(', "'rbridge_id'", ',', "'1'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'evpn_args', '=', 'dict', '(', 'instance_name', '=', 'evpn_instance_name', ',', 'max_count', '=', 'max_count', ')', 'if', 'get', ':', 'enable', '=', 'None', 'method_name', '=', "'rbridge_id_evpn_instance_duplicate_'", "'mac_timer_max_count'", 'method_class', '=', 'self', '.', '_rbridge', 'evpn_args', '[', "'rbridge_id'", ']', '=', 'rbridge_id', 'evpn_instance_mac_timer_max_count', '=', 'getattr', '(', 'method_class', ',', 'method_name', ')', 'config', '=', 'evpn_instance_mac_timer_max_count', '(', '*', '*', 'evpn_args', ')', 'if', 'get', ':', 'return', 'callback', '(', 'config', ',', 'handler', '=', "'get_config'", ')', 'if', 'not', 'enable', ':', 'config', '.', 'find', '(', "'.//*duplicate-mac-timer'", ')', '.', 'set', '(', "'operation'", ',', "'delete'", ')', 'return', 'callback', '(', 'config', ')'] | Add "Duplicate MAC max count" under evpn instance.
Args:
evpn_instance_name: Instance name for evpn
max_count: Duplicate MAC max count.
enable (bool): If target community needs to be enabled
or disabled.Default:``True``.
get (bool) : Get config instead of editing config. (True, False)
rbridge_id (str): rbridge-id for device. Only required when type is
`ve`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if 'evpn_instance_name' is not passed.
ValueError: if 'evpn_instance_name' is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output=dev.interface.evpn_instance_mac_timer_max_count(
... evpn_instance_name='100',
... max_count='10'
... rbridge_id='1')
... output=dev.interface.evpn_instance_mac_timer_max_count(
... get=True,
... evpn_instance_name='100',
... max_count='10'
... rbridge_id='1')
... output=dev.interface.evpn_instance_mac_timer_max_count(
... enable=False,
... evpn_instance_name='101',
... max_count='10'
... rbridge_id='1')
... output=dev.interface.evpn_instance_mac_timer_max_count(
... get=True,
... evpn_instance_name='101',
... rbridge_id='1')
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError | ['Add', 'Duplicate', 'MAC', 'max', 'count', 'under', 'evpn', 'instance', '.'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_0_0/interface.py#L1174-L1244 |
2,097 | gruns/icecream | icecream/icecream.py | extractArgumentsFromCallStr | def extractArgumentsFromCallStr(callStr):
"""
Parse the argument string via an AST instead of the overly simple
callStr.split(','). The latter incorrectly splits any string parameters
that contain commas therein, like ic(1, 'a,b', 2).
"""
def isTuple(ele):
return classname(ele) == 'Tuple'
paramsStr = callStr.split('(', 1)[-1].rsplit(')', 1)[0].strip()
root = ast.parse(paramsStr).body[0].value
eles = root.elts if isTuple(root) else [root]
# The ast module parses 'a, b' and '(a, b)' identically. Thus, ast.parse()
# alone can't tell the difference between
#
# ic(a, b)
#
# and
#
# ic((a, b))
#
# Detect this situation and preserve whole tuples, e.g. '(a, b)', passed to
# ic() by creating a new, temporary tuple around the original tuple and
# parsing that.
if paramsStr[0] == '(' and paramsStr[-1] == ')' and len(eles) > 1:
newTupleStr = '(' + paramsStr + ", 'ignored')"
argStrs = extractArgumentsFromCallStr(newTupleStr)[:-1]
return argStrs
indices = [
max(0, e.col_offset - 1) if isTuple(e) else e.col_offset for e in eles]
argStrs = [s.strip(' ,') for s in splitStringAtIndices(paramsStr, indices)]
return argStrs | python | def extractArgumentsFromCallStr(callStr):
"""
Parse the argument string via an AST instead of the overly simple
callStr.split(','). The latter incorrectly splits any string parameters
that contain commas therein, like ic(1, 'a,b', 2).
"""
def isTuple(ele):
return classname(ele) == 'Tuple'
paramsStr = callStr.split('(', 1)[-1].rsplit(')', 1)[0].strip()
root = ast.parse(paramsStr).body[0].value
eles = root.elts if isTuple(root) else [root]
# The ast module parses 'a, b' and '(a, b)' identically. Thus, ast.parse()
# alone can't tell the difference between
#
# ic(a, b)
#
# and
#
# ic((a, b))
#
# Detect this situation and preserve whole tuples, e.g. '(a, b)', passed to
# ic() by creating a new, temporary tuple around the original tuple and
# parsing that.
if paramsStr[0] == '(' and paramsStr[-1] == ')' and len(eles) > 1:
newTupleStr = '(' + paramsStr + ", 'ignored')"
argStrs = extractArgumentsFromCallStr(newTupleStr)[:-1]
return argStrs
indices = [
max(0, e.col_offset - 1) if isTuple(e) else e.col_offset for e in eles]
argStrs = [s.strip(' ,') for s in splitStringAtIndices(paramsStr, indices)]
return argStrs | ['def', 'extractArgumentsFromCallStr', '(', 'callStr', ')', ':', 'def', 'isTuple', '(', 'ele', ')', ':', 'return', 'classname', '(', 'ele', ')', '==', "'Tuple'", 'paramsStr', '=', 'callStr', '.', 'split', '(', "'('", ',', '1', ')', '[', '-', '1', ']', '.', 'rsplit', '(', "')'", ',', '1', ')', '[', '0', ']', '.', 'strip', '(', ')', 'root', '=', 'ast', '.', 'parse', '(', 'paramsStr', ')', '.', 'body', '[', '0', ']', '.', 'value', 'eles', '=', 'root', '.', 'elts', 'if', 'isTuple', '(', 'root', ')', 'else', '[', 'root', ']', "# The ast module parses 'a, b' and '(a, b)' identically. Thus, ast.parse()", "# alone can't tell the difference between", '#', '# ic(a, b)', '#', '# and', '#', '# ic((a, b))', '#', "# Detect this situation and preserve whole tuples, e.g. '(a, b)', passed to", '# ic() by creating a new, temporary tuple around the original tuple and', '# parsing that.', 'if', 'paramsStr', '[', '0', ']', '==', "'('", 'and', 'paramsStr', '[', '-', '1', ']', '==', "')'", 'and', 'len', '(', 'eles', ')', '>', '1', ':', 'newTupleStr', '=', "'('", '+', 'paramsStr', '+', '", \'ignored\')"', 'argStrs', '=', 'extractArgumentsFromCallStr', '(', 'newTupleStr', ')', '[', ':', '-', '1', ']', 'return', 'argStrs', 'indices', '=', '[', 'max', '(', '0', ',', 'e', '.', 'col_offset', '-', '1', ')', 'if', 'isTuple', '(', 'e', ')', 'else', 'e', '.', 'col_offset', 'for', 'e', 'in', 'eles', ']', 'argStrs', '=', '[', 's', '.', 'strip', '(', "' ,'", ')', 'for', 's', 'in', 'splitStringAtIndices', '(', 'paramsStr', ',', 'indices', ')', ']', 'return', 'argStrs'] | Parse the argument string via an AST instead of the overly simple
callStr.split(','). The latter incorrectly splits any string parameters
that contain commas therein, like ic(1, 'a,b', 2). | ['Parse', 'the', 'argument', 'string', 'via', 'an', 'AST', 'instead', 'of', 'the', 'overly', 'simple', 'callStr', '.', 'split', '(', ')', '.', 'The', 'latter', 'incorrectly', 'splits', 'any', 'string', 'parameters', 'that', 'contain', 'commas', 'therein', 'like', 'ic', '(', '1', 'a', 'b', '2', ')', '.'] | train | https://github.com/gruns/icecream/blob/cb4f3d50ec747637721fe58b80f2cc2a2baedabf/icecream/icecream.py#L459-L494 |
2,098 | minio/minio-py | minio/api.py | Minio.fget_object | def fget_object(self, bucket_name, object_name, file_path, request_headers=None, sse=None):
"""
Retrieves an object from a bucket and writes at file_path.
Examples:
minio.fget_object('foo', 'bar', 'localfile')
:param bucket_name: Bucket to read object from.
:param object_name: Name of the object to read.
:param file_path: Local file path to save the object.
:param request_headers: Any additional headers to be added with GET request.
"""
is_valid_bucket_name(bucket_name)
is_non_empty_string(object_name)
stat = self.stat_object(bucket_name, object_name, sse)
if os.path.isdir(file_path):
raise OSError("file is a directory.")
# Create top level directory if needed.
top_level_dir = os.path.dirname(file_path)
if top_level_dir:
mkdir_p(top_level_dir)
# Write to a temporary file "file_path.part.minio" before saving.
file_part_path = file_path + stat.etag + '.part.minio'
# Open file in 'write+append' mode.
with open(file_part_path, 'ab') as file_part_data:
# Save current file_part statinfo.
file_statinfo = os.stat(file_part_path)
# Get partial object.
response = self._get_partial_object(bucket_name, object_name,
offset=file_statinfo.st_size,
length=0,
request_headers=request_headers,
sse=sse)
# Save content_size to verify if we wrote more data.
content_size = int(response.headers['content-length'])
# Save total_written.
total_written = 0
for data in response.stream(amt=1024 * 1024):
file_part_data.write(data)
total_written += len(data)
# Release the connection from the response at this point.
response.release_conn()
# Verify if we wrote data properly.
if total_written < content_size:
msg = 'Data written {0} bytes is smaller than the' \
'specified size {1} bytes'.format(total_written,
content_size)
raise InvalidSizeError(msg)
if total_written > content_size:
msg = 'Data written {0} bytes is in excess than the' \
'specified size {1} bytes'.format(total_written,
content_size)
raise InvalidSizeError(msg)
#Delete existing file to be compatible with Windows
if os.path.exists(file_path):
os.remove(file_path)
#Rename with destination file path
os.rename(file_part_path, file_path)
# Return the stat
return stat | python | def fget_object(self, bucket_name, object_name, file_path, request_headers=None, sse=None):
"""
Retrieves an object from a bucket and writes at file_path.
Examples:
minio.fget_object('foo', 'bar', 'localfile')
:param bucket_name: Bucket to read object from.
:param object_name: Name of the object to read.
:param file_path: Local file path to save the object.
:param request_headers: Any additional headers to be added with GET request.
"""
is_valid_bucket_name(bucket_name)
is_non_empty_string(object_name)
stat = self.stat_object(bucket_name, object_name, sse)
if os.path.isdir(file_path):
raise OSError("file is a directory.")
# Create top level directory if needed.
top_level_dir = os.path.dirname(file_path)
if top_level_dir:
mkdir_p(top_level_dir)
# Write to a temporary file "file_path.part.minio" before saving.
file_part_path = file_path + stat.etag + '.part.minio'
# Open file in 'write+append' mode.
with open(file_part_path, 'ab') as file_part_data:
# Save current file_part statinfo.
file_statinfo = os.stat(file_part_path)
# Get partial object.
response = self._get_partial_object(bucket_name, object_name,
offset=file_statinfo.st_size,
length=0,
request_headers=request_headers,
sse=sse)
# Save content_size to verify if we wrote more data.
content_size = int(response.headers['content-length'])
# Save total_written.
total_written = 0
for data in response.stream(amt=1024 * 1024):
file_part_data.write(data)
total_written += len(data)
# Release the connection from the response at this point.
response.release_conn()
# Verify if we wrote data properly.
if total_written < content_size:
msg = 'Data written {0} bytes is smaller than the' \
'specified size {1} bytes'.format(total_written,
content_size)
raise InvalidSizeError(msg)
if total_written > content_size:
msg = 'Data written {0} bytes is in excess than the' \
'specified size {1} bytes'.format(total_written,
content_size)
raise InvalidSizeError(msg)
#Delete existing file to be compatible with Windows
if os.path.exists(file_path):
os.remove(file_path)
#Rename with destination file path
os.rename(file_part_path, file_path)
# Return the stat
return stat | ['def', 'fget_object', '(', 'self', ',', 'bucket_name', ',', 'object_name', ',', 'file_path', ',', 'request_headers', '=', 'None', ',', 'sse', '=', 'None', ')', ':', 'is_valid_bucket_name', '(', 'bucket_name', ')', 'is_non_empty_string', '(', 'object_name', ')', 'stat', '=', 'self', '.', 'stat_object', '(', 'bucket_name', ',', 'object_name', ',', 'sse', ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'file_path', ')', ':', 'raise', 'OSError', '(', '"file is a directory."', ')', '# Create top level directory if needed.', 'top_level_dir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'file_path', ')', 'if', 'top_level_dir', ':', 'mkdir_p', '(', 'top_level_dir', ')', '# Write to a temporary file "file_path.part.minio" before saving.', 'file_part_path', '=', 'file_path', '+', 'stat', '.', 'etag', '+', "'.part.minio'", "# Open file in 'write+append' mode.", 'with', 'open', '(', 'file_part_path', ',', "'ab'", ')', 'as', 'file_part_data', ':', '# Save current file_part statinfo.', 'file_statinfo', '=', 'os', '.', 'stat', '(', 'file_part_path', ')', '# Get partial object.', 'response', '=', 'self', '.', '_get_partial_object', '(', 'bucket_name', ',', 'object_name', ',', 'offset', '=', 'file_statinfo', '.', 'st_size', ',', 'length', '=', '0', ',', 'request_headers', '=', 'request_headers', ',', 'sse', '=', 'sse', ')', '# Save content_size to verify if we wrote more data.', 'content_size', '=', 'int', '(', 'response', '.', 'headers', '[', "'content-length'", ']', ')', '# Save total_written.', 'total_written', '=', '0', 'for', 'data', 'in', 'response', '.', 'stream', '(', 'amt', '=', '1024', '*', '1024', ')', ':', 'file_part_data', '.', 'write', '(', 'data', ')', 'total_written', '+=', 'len', '(', 'data', ')', '# Release the connection from the response at this point.', 'response', '.', 'release_conn', '(', ')', '# Verify if we wrote data properly.', 'if', 'total_written', '<', 'content_size', ':', 'msg', '=', "'Data written {0} bytes is smaller than the'", "'specified size {1} bytes'", '.', 'format', '(', 'total_written', ',', 'content_size', ')', 'raise', 'InvalidSizeError', '(', 'msg', ')', 'if', 'total_written', '>', 'content_size', ':', 'msg', '=', "'Data written {0} bytes is in excess than the'", "'specified size {1} bytes'", '.', 'format', '(', 'total_written', ',', 'content_size', ')', 'raise', 'InvalidSizeError', '(', 'msg', ')', '#Delete existing file to be compatible with Windows', 'if', 'os', '.', 'path', '.', 'exists', '(', 'file_path', ')', ':', 'os', '.', 'remove', '(', 'file_path', ')', '#Rename with destination file path', 'os', '.', 'rename', '(', 'file_part_path', ',', 'file_path', ')', '# Return the stat', 'return', 'stat'] | Retrieves an object from a bucket and writes at file_path.
Examples:
minio.fget_object('foo', 'bar', 'localfile')
:param bucket_name: Bucket to read object from.
:param object_name: Name of the object to read.
:param file_path: Local file path to save the object.
:param request_headers: Any additional headers to be added with GET request. | ['Retrieves', 'an', 'object', 'from', 'a', 'bucket', 'and', 'writes', 'at', 'file_path', '.'] | train | https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L573-L645 |
2,099 | saltstack/salt | salt/modules/rh_ip.py | build_bond | def build_bond(iface, **settings):
'''
Create a bond script in /etc/modprobe.d with the passed settings
and load the bonding kernel module.
CLI Example:
.. code-block:: bash
salt '*' ip.build_bond bond0 mode=balance-alb
'''
rh_major = __grains__['osrelease'][:1]
opts = _parse_settings_bond(settings, iface)
try:
template = JINJA.get_template('conf.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template conf.jinja')
return ''
data = template.render({'name': iface, 'bonding': opts})
_write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
if rh_major == '5':
__salt__['cmd.run'](
'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface),
python_shell=False
)
__salt__['cmd.run'](
'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface),
python_shell=False
)
__salt__['file.append']('/etc/modprobe.conf', path)
__salt__['kmod.load']('bonding')
if settings['test']:
return _read_temp(data)
return _read_file(path) | python | def build_bond(iface, **settings):
'''
Create a bond script in /etc/modprobe.d with the passed settings
and load the bonding kernel module.
CLI Example:
.. code-block:: bash
salt '*' ip.build_bond bond0 mode=balance-alb
'''
rh_major = __grains__['osrelease'][:1]
opts = _parse_settings_bond(settings, iface)
try:
template = JINJA.get_template('conf.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template conf.jinja')
return ''
data = template.render({'name': iface, 'bonding': opts})
_write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
if rh_major == '5':
__salt__['cmd.run'](
'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface),
python_shell=False
)
__salt__['cmd.run'](
'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface),
python_shell=False
)
__salt__['file.append']('/etc/modprobe.conf', path)
__salt__['kmod.load']('bonding')
if settings['test']:
return _read_temp(data)
return _read_file(path) | ['def', 'build_bond', '(', 'iface', ',', '*', '*', 'settings', ')', ':', 'rh_major', '=', '__grains__', '[', "'osrelease'", ']', '[', ':', '1', ']', 'opts', '=', '_parse_settings_bond', '(', 'settings', ',', 'iface', ')', 'try', ':', 'template', '=', 'JINJA', '.', 'get_template', '(', "'conf.jinja'", ')', 'except', 'jinja2', '.', 'exceptions', '.', 'TemplateNotFound', ':', 'log', '.', 'error', '(', "'Could not load template conf.jinja'", ')', 'return', "''", 'data', '=', 'template', '.', 'render', '(', '{', "'name'", ':', 'iface', ',', "'bonding'", ':', 'opts', '}', ')', '_write_file_iface', '(', 'iface', ',', 'data', ',', '_RH_NETWORK_CONF_FILES', ',', "'{0}.conf'", '.', 'format', '(', 'iface', ')', ')', 'path', '=', 'os', '.', 'path', '.', 'join', '(', '_RH_NETWORK_CONF_FILES', ',', "'{0}.conf'", '.', 'format', '(', 'iface', ')', ')', 'if', 'rh_major', '==', "'5'", ':', '__salt__', '[', "'cmd.run'", ']', '(', '\'sed -i -e "/^alias\\\\s{0}.*/d" /etc/modprobe.conf\'', '.', 'format', '(', 'iface', ')', ',', 'python_shell', '=', 'False', ')', '__salt__', '[', "'cmd.run'", ']', '(', '\'sed -i -e "/^options\\\\s{0}.*/d" /etc/modprobe.conf\'', '.', 'format', '(', 'iface', ')', ',', 'python_shell', '=', 'False', ')', '__salt__', '[', "'file.append'", ']', '(', "'/etc/modprobe.conf'", ',', 'path', ')', '__salt__', '[', "'kmod.load'", ']', '(', "'bonding'", ')', 'if', 'settings', '[', "'test'", ']', ':', 'return', '_read_temp', '(', 'data', ')', 'return', '_read_file', '(', 'path', ')'] | Create a bond script in /etc/modprobe.d with the passed settings
and load the bonding kernel module.
CLI Example:
.. code-block:: bash
salt '*' ip.build_bond bond0 mode=balance-alb | ['Create', 'a', 'bond', 'script', 'in', '/', 'etc', '/', 'modprobe', '.', 'd', 'with', 'the', 'passed', 'settings', 'and', 'load', 'the', 'bonding', 'kernel', 'module', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L980-L1017 |
Subsets and Splits