repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
NYUCCL/psiTurk
psiturk/psiturk_shell.py
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L1163-L1170
def tunnel_open(self): ''' Open tunnel ''' if (self.server.is_server_running() == 'no' or self.server.is_server_running() == 'maybe'): print("Error: Sorry, you need to have the server running to open a " "tunnel. Try 'server on' first.") else: self.tunnel.open()
[ "def", "tunnel_open", "(", "self", ")", ":", "if", "(", "self", ".", "server", ".", "is_server_running", "(", ")", "==", "'no'", "or", "self", ".", "server", ".", "is_server_running", "(", ")", "==", "'maybe'", ")", ":", "print", "(", "\"Error: Sorry, you need to have the server running to open a \"", "\"tunnel. Try 'server on' first.\"", ")", "else", ":", "self", ".", "tunnel", ".", "open", "(", ")" ]
Open tunnel
[ "Open", "tunnel" ]
python
train
ejeschke/ginga
ginga/ImageView.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L1899-L1919
def get_data_pct(self, xpct, ypct): """Calculate new data size for the given axis ratios. See :meth:`get_limits`. Parameters ---------- xpct, ypct : float Ratio for X and Y, respectively, where 1 is 100%. Returns ------- x, y : int Scaled dimensions. """ xy_mn, xy_mx = self.get_limits() width = abs(xy_mx[0] - xy_mn[0]) height = abs(xy_mx[1] - xy_mn[1]) x, y = int(float(xpct) * width), int(float(ypct) * height) return (x, y)
[ "def", "get_data_pct", "(", "self", ",", "xpct", ",", "ypct", ")", ":", "xy_mn", ",", "xy_mx", "=", "self", ".", "get_limits", "(", ")", "width", "=", "abs", "(", "xy_mx", "[", "0", "]", "-", "xy_mn", "[", "0", "]", ")", "height", "=", "abs", "(", "xy_mx", "[", "1", "]", "-", "xy_mn", "[", "1", "]", ")", "x", ",", "y", "=", "int", "(", "float", "(", "xpct", ")", "*", "width", ")", ",", "int", "(", "float", "(", "ypct", ")", "*", "height", ")", "return", "(", "x", ",", "y", ")" ]
Calculate new data size for the given axis ratios. See :meth:`get_limits`. Parameters ---------- xpct, ypct : float Ratio for X and Y, respectively, where 1 is 100%. Returns ------- x, y : int Scaled dimensions.
[ "Calculate", "new", "data", "size", "for", "the", "given", "axis", "ratios", ".", "See", ":", "meth", ":", "get_limits", "." ]
python
train
pmacosta/pexdoc
pexdoc/pcontracts.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/pcontracts.py#L164-L171
def _get_custom_contract(param_contract): """Return True if parameter contract is a custom contract, False otherwise.""" if not isinstance(param_contract, str): return None for custom_contract in _CUSTOM_CONTRACTS: if re.search(r"\b{0}\b".format(custom_contract), param_contract): return custom_contract return None
[ "def", "_get_custom_contract", "(", "param_contract", ")", ":", "if", "not", "isinstance", "(", "param_contract", ",", "str", ")", ":", "return", "None", "for", "custom_contract", "in", "_CUSTOM_CONTRACTS", ":", "if", "re", ".", "search", "(", "r\"\\b{0}\\b\"", ".", "format", "(", "custom_contract", ")", ",", "param_contract", ")", ":", "return", "custom_contract", "return", "None" ]
Return True if parameter contract is a custom contract, False otherwise.
[ "Return", "True", "if", "parameter", "contract", "is", "a", "custom", "contract", "False", "otherwise", "." ]
python
train
senaite/senaite.core
bika/lims/utils/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/utils/__init__.py#L179-L194
def formatDateParms(context, date_id): """ Obtain and reformat the from and to dates into a printable date parameter construct """ from_date = context.REQUEST.get('%s_fromdate' % date_id, None) to_date = context.REQUEST.get('%s_todate' % date_id, None) date_parms = {} if from_date and to_date: date_parms = 'from %s to %s' % (from_date, to_date) elif from_date: date_parms = 'from %s' % (from_date) elif to_date: date_parms = 'to %s' % (to_date) return date_parms
[ "def", "formatDateParms", "(", "context", ",", "date_id", ")", ":", "from_date", "=", "context", ".", "REQUEST", ".", "get", "(", "'%s_fromdate'", "%", "date_id", ",", "None", ")", "to_date", "=", "context", ".", "REQUEST", ".", "get", "(", "'%s_todate'", "%", "date_id", ",", "None", ")", "date_parms", "=", "{", "}", "if", "from_date", "and", "to_date", ":", "date_parms", "=", "'from %s to %s'", "%", "(", "from_date", ",", "to_date", ")", "elif", "from_date", ":", "date_parms", "=", "'from %s'", "%", "(", "from_date", ")", "elif", "to_date", ":", "date_parms", "=", "'to %s'", "%", "(", "to_date", ")", "return", "date_parms" ]
Obtain and reformat the from and to dates into a printable date parameter construct
[ "Obtain", "and", "reformat", "the", "from", "and", "to", "dates", "into", "a", "printable", "date", "parameter", "construct" ]
python
train
ray-project/ray
python/ray/node.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/node.py#L450-L463
def start_raylet_monitor(self): """Start the raylet monitor.""" stdout_file, stderr_file = self.new_log_files("raylet_monitor") process_info = ray.services.start_raylet_monitor( self._redis_address, stdout_file=stdout_file, stderr_file=stderr_file, redis_password=self._ray_params.redis_password, config=self._config) assert (ray_constants.PROCESS_TYPE_RAYLET_MONITOR not in self.all_processes) self.all_processes[ray_constants.PROCESS_TYPE_RAYLET_MONITOR] = [ process_info ]
[ "def", "start_raylet_monitor", "(", "self", ")", ":", "stdout_file", ",", "stderr_file", "=", "self", ".", "new_log_files", "(", "\"raylet_monitor\"", ")", "process_info", "=", "ray", ".", "services", ".", "start_raylet_monitor", "(", "self", ".", "_redis_address", ",", "stdout_file", "=", "stdout_file", ",", "stderr_file", "=", "stderr_file", ",", "redis_password", "=", "self", ".", "_ray_params", ".", "redis_password", ",", "config", "=", "self", ".", "_config", ")", "assert", "(", "ray_constants", ".", "PROCESS_TYPE_RAYLET_MONITOR", "not", "in", "self", ".", "all_processes", ")", "self", ".", "all_processes", "[", "ray_constants", ".", "PROCESS_TYPE_RAYLET_MONITOR", "]", "=", "[", "process_info", "]" ]
Start the raylet monitor.
[ "Start", "the", "raylet", "monitor", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L1160-L1211
def lockToColumn(self, index): """ Sets the column that the tree view will lock to. If None is supplied, then locking will be removed. :param index | <int> || None """ self._lockColumn = index if index is None: self.__destroyLockedView() return else: if not self._lockedView: view = QtGui.QTreeView(self.parent()) view.setModel(self.model()) view.setSelectionModel(self.selectionModel()) view.setItemDelegate(self.itemDelegate()) view.setFrameShape(view.NoFrame) view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) view.setRootIsDecorated(self.rootIsDecorated()) view.setUniformRowHeights(True) view.setFocusProxy(self) view.header().setFocusProxy(self.header()) view.setStyleSheet(self.styleSheet()) view.setAutoScroll(False) view.setSortingEnabled(self.isSortingEnabled()) view.setPalette(self.palette()) view.move(self.x(), self.y()) self.setAutoScroll(False) self.setUniformRowHeights(True) view.collapsed.connect(self.collapse) view.expanded.connect(self.expand) view.expanded.connect(self.__updateLockedView) view.collapsed.connect(self.__updateLockedView) view_head = view.header() for i in range(self.columnCount()): view_head.setResizeMode(i, self.header().resizeMode(i)) view.header().sectionResized.connect(self.__updateStandardSection) self.header().sectionResized.connect(self.__updateLockedSection) vbar = view.verticalScrollBar() self.verticalScrollBar().valueChanged.connect(vbar.setValue) self._lockedView = view self.__updateLockedView()
[ "def", "lockToColumn", "(", "self", ",", "index", ")", ":", "self", ".", "_lockColumn", "=", "index", "if", "index", "is", "None", ":", "self", ".", "__destroyLockedView", "(", ")", "return", "else", ":", "if", "not", "self", ".", "_lockedView", ":", "view", "=", "QtGui", ".", "QTreeView", "(", "self", ".", "parent", "(", ")", ")", "view", ".", "setModel", "(", "self", ".", "model", "(", ")", ")", "view", ".", "setSelectionModel", "(", "self", ".", "selectionModel", "(", ")", ")", "view", ".", "setItemDelegate", "(", "self", ".", "itemDelegate", "(", ")", ")", "view", ".", "setFrameShape", "(", "view", ".", "NoFrame", ")", "view", ".", "setVerticalScrollBarPolicy", "(", "QtCore", ".", "Qt", ".", "ScrollBarAlwaysOff", ")", "view", ".", "setHorizontalScrollBarPolicy", "(", "QtCore", ".", "Qt", ".", "ScrollBarAlwaysOff", ")", "view", ".", "setRootIsDecorated", "(", "self", ".", "rootIsDecorated", "(", ")", ")", "view", ".", "setUniformRowHeights", "(", "True", ")", "view", ".", "setFocusProxy", "(", "self", ")", "view", ".", "header", "(", ")", ".", "setFocusProxy", "(", "self", ".", "header", "(", ")", ")", "view", ".", "setStyleSheet", "(", "self", ".", "styleSheet", "(", ")", ")", "view", ".", "setAutoScroll", "(", "False", ")", "view", ".", "setSortingEnabled", "(", "self", ".", "isSortingEnabled", "(", ")", ")", "view", ".", "setPalette", "(", "self", ".", "palette", "(", ")", ")", "view", ".", "move", "(", "self", ".", "x", "(", ")", ",", "self", ".", "y", "(", ")", ")", "self", ".", "setAutoScroll", "(", "False", ")", "self", ".", "setUniformRowHeights", "(", "True", ")", "view", ".", "collapsed", ".", "connect", "(", "self", ".", "collapse", ")", "view", ".", "expanded", ".", "connect", "(", "self", ".", "expand", ")", "view", ".", "expanded", ".", "connect", "(", "self", ".", "__updateLockedView", ")", "view", ".", "collapsed", ".", "connect", "(", "self", ".", "__updateLockedView", ")", "view_head", "=", "view", ".", "header", "(", ")", "for", "i", "in", "range", "(", "self", ".", "columnCount", "(", ")", ")", ":", "view_head", ".", "setResizeMode", "(", "i", ",", "self", ".", "header", "(", ")", ".", "resizeMode", "(", "i", ")", ")", "view", ".", "header", "(", ")", ".", "sectionResized", ".", "connect", "(", "self", ".", "__updateStandardSection", ")", "self", ".", "header", "(", ")", ".", "sectionResized", ".", "connect", "(", "self", ".", "__updateLockedSection", ")", "vbar", "=", "view", ".", "verticalScrollBar", "(", ")", "self", ".", "verticalScrollBar", "(", ")", ".", "valueChanged", ".", "connect", "(", "vbar", ".", "setValue", ")", "self", ".", "_lockedView", "=", "view", "self", ".", "__updateLockedView", "(", ")" ]
Sets the column that the tree view will lock to. If None is supplied, then locking will be removed. :param index | <int> || None
[ "Sets", "the", "column", "that", "the", "tree", "view", "will", "lock", "to", ".", "If", "None", "is", "supplied", "then", "locking", "will", "be", "removed", ".", ":", "param", "index", "|", "<int", ">", "||", "None" ]
python
train
robinandeer/puzzle
puzzle/cli/cases.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/cli/cases.py#L16-L39
def cases(ctx, root): """ Show all cases in the database. If no database was found run puzzle init first. """ root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle") if os.path.isfile(root): logger.error("'root' can't be a file") ctx.abort() logger.info("Root directory is: {}".format(root)) db_path = os.path.join(root, 'puzzle_db.sqlite3') logger.info("db path is: {}".format(db_path)) if not os.path.exists(db_path): logger.warn("database not initialized, run 'puzzle init'") ctx.abort() store = SqlStore(db_path) for case in store.cases(): click.echo(case)
[ "def", "cases", "(", "ctx", ",", "root", ")", ":", "root", "=", "root", "or", "ctx", ".", "obj", ".", "get", "(", "'root'", ")", "or", "os", ".", "path", ".", "expanduser", "(", "\"~/.puzzle\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "root", ")", ":", "logger", ".", "error", "(", "\"'root' can't be a file\"", ")", "ctx", ".", "abort", "(", ")", "logger", ".", "info", "(", "\"Root directory is: {}\"", ".", "format", "(", "root", ")", ")", "db_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "'puzzle_db.sqlite3'", ")", "logger", ".", "info", "(", "\"db path is: {}\"", ".", "format", "(", "db_path", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "db_path", ")", ":", "logger", ".", "warn", "(", "\"database not initialized, run 'puzzle init'\"", ")", "ctx", ".", "abort", "(", ")", "store", "=", "SqlStore", "(", "db_path", ")", "for", "case", "in", "store", ".", "cases", "(", ")", ":", "click", ".", "echo", "(", "case", ")" ]
Show all cases in the database. If no database was found run puzzle init first.
[ "Show", "all", "cases", "in", "the", "database", "." ]
python
train
nicolargo/glances
glances/exports/glances_restful.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/exports/glances_restful.py#L57-L68
def init(self): """Init the connection to the RESTful server.""" if not self.export_enable: return None # Build the RESTful URL where the stats will be posted url = '{}://{}:{}{}'.format(self.protocol, self.host, self.port, self.path) logger.info( "Stats will be exported to the RESTful endpoint {}".format(url)) return url
[ "def", "init", "(", "self", ")", ":", "if", "not", "self", ".", "export_enable", ":", "return", "None", "# Build the RESTful URL where the stats will be posted", "url", "=", "'{}://{}:{}{}'", ".", "format", "(", "self", ".", "protocol", ",", "self", ".", "host", ",", "self", ".", "port", ",", "self", ".", "path", ")", "logger", ".", "info", "(", "\"Stats will be exported to the RESTful endpoint {}\"", ".", "format", "(", "url", ")", ")", "return", "url" ]
Init the connection to the RESTful server.
[ "Init", "the", "connection", "to", "the", "RESTful", "server", "." ]
python
train
orb-framework/orb
orb/core/schema.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/schema.py#L442-L451
def setIndexes(self, indexes): """ Sets the list of indexed lookups for this schema to the inputted list. :param indexes | [<orb.Index>, ..] """ self.__indexes = {} for name, index in indexes.items(): self.__indexes[name] = index index.setSchema(self)
[ "def", "setIndexes", "(", "self", ",", "indexes", ")", ":", "self", ".", "__indexes", "=", "{", "}", "for", "name", ",", "index", "in", "indexes", ".", "items", "(", ")", ":", "self", ".", "__indexes", "[", "name", "]", "=", "index", "index", ".", "setSchema", "(", "self", ")" ]
Sets the list of indexed lookups for this schema to the inputted list. :param indexes | [<orb.Index>, ..]
[ "Sets", "the", "list", "of", "indexed", "lookups", "for", "this", "schema", "to", "the", "inputted", "list", ".", ":", "param", "indexes", "|", "[", "<orb", ".", "Index", ">", "..", "]" ]
python
train
uber/rides-python-sdk
uber_rides/auth.py
https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/auth.py#L423-L444
def get_session(self): """Create Session to store credentials. Returns (Session) A Session object with OAuth 2.0 credentials. """ response = _request_access_token( grant_type=auth.CLIENT_CREDENTIALS_GRANT, client_id=self.client_id, client_secret=self.client_secret, scopes=self.scopes, ) oauth2credential = OAuth2Credential.make_from_response( response=response, grant_type=auth.CLIENT_CREDENTIALS_GRANT, client_id=self.client_id, client_secret=self.client_secret, ) return Session(oauth2credential=oauth2credential)
[ "def", "get_session", "(", "self", ")", ":", "response", "=", "_request_access_token", "(", "grant_type", "=", "auth", ".", "CLIENT_CREDENTIALS_GRANT", ",", "client_id", "=", "self", ".", "client_id", ",", "client_secret", "=", "self", ".", "client_secret", ",", "scopes", "=", "self", ".", "scopes", ",", ")", "oauth2credential", "=", "OAuth2Credential", ".", "make_from_response", "(", "response", "=", "response", ",", "grant_type", "=", "auth", ".", "CLIENT_CREDENTIALS_GRANT", ",", "client_id", "=", "self", ".", "client_id", ",", "client_secret", "=", "self", ".", "client_secret", ",", ")", "return", "Session", "(", "oauth2credential", "=", "oauth2credential", ")" ]
Create Session to store credentials. Returns (Session) A Session object with OAuth 2.0 credentials.
[ "Create", "Session", "to", "store", "credentials", "." ]
python
train
wbond/oscrypto
oscrypto/_win/symmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_win/symmetric.py#L556-L664
def _advapi32_create_handles(cipher, key, iv): """ Creates an HCRYPTPROV and HCRYPTKEY for symmetric encryption/decryption. The HCRYPTPROV must be released by close_context_handle() and the HCRYPTKEY must be released by advapi32.CryptDestroyKey() when done. :param cipher: A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key", "rc2", "rc4" :param key: A byte string of the symmetric key :param iv: The initialization vector - a byte string - unused for RC4 :return: A tuple of (HCRYPTPROV, HCRYPTKEY) """ context_handle = None if cipher == 'aes': algorithm_id = { 16: Advapi32Const.CALG_AES_128, 24: Advapi32Const.CALG_AES_192, 32: Advapi32Const.CALG_AES_256, }[len(key)] else: algorithm_id = { 'des': Advapi32Const.CALG_DES, 'tripledes_2key': Advapi32Const.CALG_3DES_112, 'tripledes_3key': Advapi32Const.CALG_3DES, 'rc2': Advapi32Const.CALG_RC2, 'rc4': Advapi32Const.CALG_RC4, }[cipher] provider = Advapi32Const.MS_ENH_RSA_AES_PROV context_handle = open_context_handle(provider, verify_only=False) blob_header_pointer = struct(advapi32, 'BLOBHEADER') blob_header = unwrap(blob_header_pointer) blob_header.bType = Advapi32Const.PLAINTEXTKEYBLOB blob_header.bVersion = Advapi32Const.CUR_BLOB_VERSION blob_header.reserved = 0 blob_header.aiKeyAlg = algorithm_id blob_struct_pointer = struct(advapi32, 'PLAINTEXTKEYBLOB') blob_struct = unwrap(blob_struct_pointer) blob_struct.hdr = blob_header blob_struct.dwKeySize = len(key) blob = struct_bytes(blob_struct_pointer) + key flags = 0 if cipher in set(['rc2', 'rc4']) and len(key) == 5: flags = Advapi32Const.CRYPT_NO_SALT key_handle_pointer = new(advapi32, 'HCRYPTKEY *') res = advapi32.CryptImportKey( context_handle, blob, len(blob), null(), flags, key_handle_pointer ) handle_error(res) key_handle = unwrap(key_handle_pointer) if cipher == 'rc2': buf = new(advapi32, 'DWORD *', len(key) * 8) res = advapi32.CryptSetKeyParam( key_handle, Advapi32Const.KP_EFFECTIVE_KEYLEN, buf, 0 ) handle_error(res) if cipher != 'rc4': res = advapi32.CryptSetKeyParam( key_handle, Advapi32Const.KP_IV, iv, 0 ) handle_error(res) buf = new(advapi32, 'DWORD *', Advapi32Const.CRYPT_MODE_CBC) res = advapi32.CryptSetKeyParam( key_handle, Advapi32Const.KP_MODE, buf, 0 ) handle_error(res) buf = new(advapi32, 'DWORD *', Advapi32Const.PKCS5_PADDING) res = advapi32.CryptSetKeyParam( key_handle, Advapi32Const.KP_PADDING, buf, 0 ) handle_error(res) return (context_handle, key_handle)
[ "def", "_advapi32_create_handles", "(", "cipher", ",", "key", ",", "iv", ")", ":", "context_handle", "=", "None", "if", "cipher", "==", "'aes'", ":", "algorithm_id", "=", "{", "16", ":", "Advapi32Const", ".", "CALG_AES_128", ",", "24", ":", "Advapi32Const", ".", "CALG_AES_192", ",", "32", ":", "Advapi32Const", ".", "CALG_AES_256", ",", "}", "[", "len", "(", "key", ")", "]", "else", ":", "algorithm_id", "=", "{", "'des'", ":", "Advapi32Const", ".", "CALG_DES", ",", "'tripledes_2key'", ":", "Advapi32Const", ".", "CALG_3DES_112", ",", "'tripledes_3key'", ":", "Advapi32Const", ".", "CALG_3DES", ",", "'rc2'", ":", "Advapi32Const", ".", "CALG_RC2", ",", "'rc4'", ":", "Advapi32Const", ".", "CALG_RC4", ",", "}", "[", "cipher", "]", "provider", "=", "Advapi32Const", ".", "MS_ENH_RSA_AES_PROV", "context_handle", "=", "open_context_handle", "(", "provider", ",", "verify_only", "=", "False", ")", "blob_header_pointer", "=", "struct", "(", "advapi32", ",", "'BLOBHEADER'", ")", "blob_header", "=", "unwrap", "(", "blob_header_pointer", ")", "blob_header", ".", "bType", "=", "Advapi32Const", ".", "PLAINTEXTKEYBLOB", "blob_header", ".", "bVersion", "=", "Advapi32Const", ".", "CUR_BLOB_VERSION", "blob_header", ".", "reserved", "=", "0", "blob_header", ".", "aiKeyAlg", "=", "algorithm_id", "blob_struct_pointer", "=", "struct", "(", "advapi32", ",", "'PLAINTEXTKEYBLOB'", ")", "blob_struct", "=", "unwrap", "(", "blob_struct_pointer", ")", "blob_struct", ".", "hdr", "=", "blob_header", "blob_struct", ".", "dwKeySize", "=", "len", "(", "key", ")", "blob", "=", "struct_bytes", "(", "blob_struct_pointer", ")", "+", "key", "flags", "=", "0", "if", "cipher", "in", "set", "(", "[", "'rc2'", ",", "'rc4'", "]", ")", "and", "len", "(", "key", ")", "==", "5", ":", "flags", "=", "Advapi32Const", ".", "CRYPT_NO_SALT", "key_handle_pointer", "=", "new", "(", "advapi32", ",", "'HCRYPTKEY *'", ")", "res", "=", "advapi32", ".", "CryptImportKey", "(", "context_handle", ",", "blob", ",", "len", "(", "blob", ")", ",", "null", "(", ")", ",", "flags", ",", "key_handle_pointer", ")", "handle_error", "(", "res", ")", "key_handle", "=", "unwrap", "(", "key_handle_pointer", ")", "if", "cipher", "==", "'rc2'", ":", "buf", "=", "new", "(", "advapi32", ",", "'DWORD *'", ",", "len", "(", "key", ")", "*", "8", ")", "res", "=", "advapi32", ".", "CryptSetKeyParam", "(", "key_handle", ",", "Advapi32Const", ".", "KP_EFFECTIVE_KEYLEN", ",", "buf", ",", "0", ")", "handle_error", "(", "res", ")", "if", "cipher", "!=", "'rc4'", ":", "res", "=", "advapi32", ".", "CryptSetKeyParam", "(", "key_handle", ",", "Advapi32Const", ".", "KP_IV", ",", "iv", ",", "0", ")", "handle_error", "(", "res", ")", "buf", "=", "new", "(", "advapi32", ",", "'DWORD *'", ",", "Advapi32Const", ".", "CRYPT_MODE_CBC", ")", "res", "=", "advapi32", ".", "CryptSetKeyParam", "(", "key_handle", ",", "Advapi32Const", ".", "KP_MODE", ",", "buf", ",", "0", ")", "handle_error", "(", "res", ")", "buf", "=", "new", "(", "advapi32", ",", "'DWORD *'", ",", "Advapi32Const", ".", "PKCS5_PADDING", ")", "res", "=", "advapi32", ".", "CryptSetKeyParam", "(", "key_handle", ",", "Advapi32Const", ".", "KP_PADDING", ",", "buf", ",", "0", ")", "handle_error", "(", "res", ")", "return", "(", "context_handle", ",", "key_handle", ")" ]
Creates an HCRYPTPROV and HCRYPTKEY for symmetric encryption/decryption. The HCRYPTPROV must be released by close_context_handle() and the HCRYPTKEY must be released by advapi32.CryptDestroyKey() when done. :param cipher: A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key", "rc2", "rc4" :param key: A byte string of the symmetric key :param iv: The initialization vector - a byte string - unused for RC4 :return: A tuple of (HCRYPTPROV, HCRYPTKEY)
[ "Creates", "an", "HCRYPTPROV", "and", "HCRYPTKEY", "for", "symmetric", "encryption", "/", "decryption", ".", "The", "HCRYPTPROV", "must", "be", "released", "by", "close_context_handle", "()", "and", "the", "HCRYPTKEY", "must", "be", "released", "by", "advapi32", ".", "CryptDestroyKey", "()", "when", "done", "." ]
python
valid
dannyzed/julian
julian/julian.py
https://github.com/dannyzed/julian/blob/7ca7e25afe2704f072b38f8e464bc4e7d4d7d0d7/julian/julian.py#L84-L134
def from_jd(jd: float, fmt: str = 'jd') -> datetime: """ Converts a Julian Date to a datetime object. Algorithm is from Fliegel and van Flandern (1968) Parameters ---------- jd: float Julian Date as type specified in the string fmt fmt: str Returns ------- dt: datetime """ jd, jdf = __from_format(jd, fmt) l = jd+68569 n = 4*l//146097 l = l-(146097*n+3)//4 i = 4000*(l+1)//1461001 l = l-1461*i//4+31 j = 80*l//2447 k = l-2447*j//80 l = j//11 j = j+2-12*l i = 100*(n-49)+i+l year = int(i) month = int(j) day = int(k) # in microseconds frac_component = int(jdf * (1e6*24*3600)) hours = int(frac_component // (1e6*3600)) frac_component -= hours * 1e6*3600 minutes = int(frac_component // (1e6*60)) frac_component -= minutes * 1e6*60 seconds = int(frac_component // 1e6) frac_component -= seconds*1e6 frac_component = int(frac_component) dt = datetime(year=year, month=month, day=day, hour=hours, minute=minutes, second=seconds, microsecond=frac_component) return dt
[ "def", "from_jd", "(", "jd", ":", "float", ",", "fmt", ":", "str", "=", "'jd'", ")", "->", "datetime", ":", "jd", ",", "jdf", "=", "__from_format", "(", "jd", ",", "fmt", ")", "l", "=", "jd", "+", "68569", "n", "=", "4", "*", "l", "//", "146097", "l", "=", "l", "-", "(", "146097", "*", "n", "+", "3", ")", "//", "4", "i", "=", "4000", "*", "(", "l", "+", "1", ")", "//", "1461001", "l", "=", "l", "-", "1461", "*", "i", "//", "4", "+", "31", "j", "=", "80", "*", "l", "//", "2447", "k", "=", "l", "-", "2447", "*", "j", "//", "80", "l", "=", "j", "//", "11", "j", "=", "j", "+", "2", "-", "12", "*", "l", "i", "=", "100", "*", "(", "n", "-", "49", ")", "+", "i", "+", "l", "year", "=", "int", "(", "i", ")", "month", "=", "int", "(", "j", ")", "day", "=", "int", "(", "k", ")", "# in microseconds", "frac_component", "=", "int", "(", "jdf", "*", "(", "1e6", "*", "24", "*", "3600", ")", ")", "hours", "=", "int", "(", "frac_component", "//", "(", "1e6", "*", "3600", ")", ")", "frac_component", "-=", "hours", "*", "1e6", "*", "3600", "minutes", "=", "int", "(", "frac_component", "//", "(", "1e6", "*", "60", ")", ")", "frac_component", "-=", "minutes", "*", "1e6", "*", "60", "seconds", "=", "int", "(", "frac_component", "//", "1e6", ")", "frac_component", "-=", "seconds", "*", "1e6", "frac_component", "=", "int", "(", "frac_component", ")", "dt", "=", "datetime", "(", "year", "=", "year", ",", "month", "=", "month", ",", "day", "=", "day", ",", "hour", "=", "hours", ",", "minute", "=", "minutes", ",", "second", "=", "seconds", ",", "microsecond", "=", "frac_component", ")", "return", "dt" ]
Converts a Julian Date to a datetime object. Algorithm is from Fliegel and van Flandern (1968) Parameters ---------- jd: float Julian Date as type specified in the string fmt fmt: str Returns ------- dt: datetime
[ "Converts", "a", "Julian", "Date", "to", "a", "datetime", "object", ".", "Algorithm", "is", "from", "Fliegel", "and", "van", "Flandern", "(", "1968", ")" ]
python
train
KnightHawk3/Hummingbird
hummingbird/__init__.py
https://github.com/KnightHawk3/Hummingbird/blob/10b918534b112c95a93f04dd76bfb7479c4f3f21/hummingbird/__init__.py#L124-L134
def get_feed(self, username): """Gets a user's feed. :param str username: User to fetch feed from. """ r = self._query_('/users/%s/feed' % username, 'GET') results = [Story(item) for item in r.json()] return results
[ "def", "get_feed", "(", "self", ",", "username", ")", ":", "r", "=", "self", ".", "_query_", "(", "'/users/%s/feed'", "%", "username", ",", "'GET'", ")", "results", "=", "[", "Story", "(", "item", ")", "for", "item", "in", "r", ".", "json", "(", ")", "]", "return", "results" ]
Gets a user's feed. :param str username: User to fetch feed from.
[ "Gets", "a", "user", "s", "feed", "." ]
python
train
hover2pi/svo_filters
svo_filters/svo.py
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L733-L760
def wave_units(self, units): """ A setter for the wavelength units Parameters ---------- units: str, astropy.units.core.PrefixUnit The wavelength units """ # Make sure it's length units if not units.is_equivalent(q.m): raise ValueError(units, ": New wavelength units must be a length.") # Update the units self._wave_units = units # Update all the wavelength values self._wave = self.wave.to(self.wave_units).round(5) self.wave_min = self.wave_min.to(self.wave_units).round(5) self.wave_max = self.wave_max.to(self.wave_units).round(5) self.wave_eff = self.wave_eff.to(self.wave_units).round(5) self.wave_center = self.wave_center.to(self.wave_units).round(5) self.wave_mean = self.wave_mean.to(self.wave_units).round(5) self.wave_peak = self.wave_peak.to(self.wave_units).round(5) self.wave_phot = self.wave_phot.to(self.wave_units).round(5) self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5) self.width_eff = self.width_eff.to(self.wave_units).round(5) self.fwhm = self.fwhm.to(self.wave_units).round(5)
[ "def", "wave_units", "(", "self", ",", "units", ")", ":", "# Make sure it's length units", "if", "not", "units", ".", "is_equivalent", "(", "q", ".", "m", ")", ":", "raise", "ValueError", "(", "units", ",", "\": New wavelength units must be a length.\"", ")", "# Update the units", "self", ".", "_wave_units", "=", "units", "# Update all the wavelength values", "self", ".", "_wave", "=", "self", ".", "wave", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")", "self", ".", "wave_min", "=", "self", ".", "wave_min", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")", "self", ".", "wave_max", "=", "self", ".", "wave_max", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")", "self", ".", "wave_eff", "=", "self", ".", "wave_eff", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")", "self", ".", "wave_center", "=", "self", ".", "wave_center", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")", "self", ".", "wave_mean", "=", "self", ".", "wave_mean", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")", "self", ".", "wave_peak", "=", "self", ".", "wave_peak", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")", "self", ".", "wave_phot", "=", "self", ".", "wave_phot", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")", "self", ".", "wave_pivot", "=", "self", ".", "wave_pivot", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")", "self", ".", "width_eff", "=", "self", ".", "width_eff", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")", "self", ".", "fwhm", "=", "self", ".", "fwhm", ".", "to", "(", "self", ".", "wave_units", ")", ".", "round", "(", "5", ")" ]
A setter for the wavelength units Parameters ---------- units: str, astropy.units.core.PrefixUnit The wavelength units
[ "A", "setter", "for", "the", "wavelength", "units" ]
python
train
VingtCinq/python-resize-image
resizeimage/resizeimage.py
https://github.com/VingtCinq/python-resize-image/blob/a4e645792ef30c5fcc558df6da6de18b1ecb95ea/resizeimage/resizeimage.py#L11-L35
def validate(validator): """ Return a decorator that validates arguments with provided `validator` function. This will also store the validator function as `func.validate`. The decorator returned by this function, can bypass the validator if `validate=False` is passed as argument otherwise the fucntion is called directly. The validator must raise an exception, if the function can not be called. """ def decorator(func): """Bound decorator to a particular validator function""" @wraps(func) def wrapper(image, size, validate=True): if validate: validator(image, size) return func(image, size) return wrapper return decorator
[ "def", "validate", "(", "validator", ")", ":", "def", "decorator", "(", "func", ")", ":", "\"\"\"Bound decorator to a particular validator function\"\"\"", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "image", ",", "size", ",", "validate", "=", "True", ")", ":", "if", "validate", ":", "validator", "(", "image", ",", "size", ")", "return", "func", "(", "image", ",", "size", ")", "return", "wrapper", "return", "decorator" ]
Return a decorator that validates arguments with provided `validator` function. This will also store the validator function as `func.validate`. The decorator returned by this function, can bypass the validator if `validate=False` is passed as argument otherwise the fucntion is called directly. The validator must raise an exception, if the function can not be called.
[ "Return", "a", "decorator", "that", "validates", "arguments", "with", "provided", "validator", "function", "." ]
python
test
mdickinson/bigfloat
bigfloat/context.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/context.py#L296-L327
def _apply_function_in_context(cls, f, args, context): """ Apply an MPFR function 'f' to the given arguments 'args', rounding to the given context. Returns a new Mpfr object with precision taken from the current context. """ rounding = context.rounding bf = mpfr.Mpfr_t.__new__(cls) mpfr.mpfr_init2(bf, context.precision) args = (bf,) + args + (rounding,) ternary = f(*args) with _temporary_exponent_bounds(context.emin, context.emax): ternary = mpfr.mpfr_check_range(bf, ternary, rounding) if context.subnormalize: # mpfr_subnormalize doesn't set underflow and # subnormal flags, so we do that ourselves. We choose # to set the underflow flag for *all* cases where the # 'after rounding' result is smaller than the smallest # normal number, even if that result is exact. # if bf is zero but ternary is nonzero, the underflow # flag will already have been set by mpfr_check_range; underflow = ( mpfr.mpfr_number_p(bf) and not mpfr.mpfr_zero_p(bf) and mpfr.mpfr_get_exp(bf) < context.precision - 1 + context.emin) if underflow: mpfr.mpfr_set_underflow() ternary = mpfr.mpfr_subnormalize(bf, ternary, rounding) if ternary: mpfr.mpfr_set_inexflag() return bf
[ "def", "_apply_function_in_context", "(", "cls", ",", "f", ",", "args", ",", "context", ")", ":", "rounding", "=", "context", ".", "rounding", "bf", "=", "mpfr", ".", "Mpfr_t", ".", "__new__", "(", "cls", ")", "mpfr", ".", "mpfr_init2", "(", "bf", ",", "context", ".", "precision", ")", "args", "=", "(", "bf", ",", ")", "+", "args", "+", "(", "rounding", ",", ")", "ternary", "=", "f", "(", "*", "args", ")", "with", "_temporary_exponent_bounds", "(", "context", ".", "emin", ",", "context", ".", "emax", ")", ":", "ternary", "=", "mpfr", ".", "mpfr_check_range", "(", "bf", ",", "ternary", ",", "rounding", ")", "if", "context", ".", "subnormalize", ":", "# mpfr_subnormalize doesn't set underflow and", "# subnormal flags, so we do that ourselves. We choose", "# to set the underflow flag for *all* cases where the", "# 'after rounding' result is smaller than the smallest", "# normal number, even if that result is exact.", "# if bf is zero but ternary is nonzero, the underflow", "# flag will already have been set by mpfr_check_range;", "underflow", "=", "(", "mpfr", ".", "mpfr_number_p", "(", "bf", ")", "and", "not", "mpfr", ".", "mpfr_zero_p", "(", "bf", ")", "and", "mpfr", ".", "mpfr_get_exp", "(", "bf", ")", "<", "context", ".", "precision", "-", "1", "+", "context", ".", "emin", ")", "if", "underflow", ":", "mpfr", ".", "mpfr_set_underflow", "(", ")", "ternary", "=", "mpfr", ".", "mpfr_subnormalize", "(", "bf", ",", "ternary", ",", "rounding", ")", "if", "ternary", ":", "mpfr", ".", "mpfr_set_inexflag", "(", ")", "return", "bf" ]
Apply an MPFR function 'f' to the given arguments 'args', rounding to the given context. Returns a new Mpfr object with precision taken from the current context.
[ "Apply", "an", "MPFR", "function", "f", "to", "the", "given", "arguments", "args", "rounding", "to", "the", "given", "context", ".", "Returns", "a", "new", "Mpfr", "object", "with", "precision", "taken", "from", "the", "current", "context", "." ]
python
train
markovmodel/PyEMMA
pyemma/plots/plots2d.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/plots/plots2d.py#L157-L192
def get_grid_data(xall, yall, zall, nbins=100, method='nearest'): """Interpolate unstructured two-dimensional data. Parameters ---------- xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. zall : ndarray(T) Sample z-coordinates. nbins : int, optional, default=100 Number of histogram bins used in x/y-dimensions. method : str, optional, default='nearest' Assignment method; scipy.interpolate.griddata supports the methods 'nearest', 'linear', and 'cubic'. Returns ------- x : ndarray(nbins, nbins) The bins' x-coordinates in meshgrid format. y : ndarray(nbins, nbins) The bins' y-coordinates in meshgrid format. z : ndarray(nbins, nbins) Interpolated z-data in meshgrid format. """ from scipy.interpolate import griddata x, y = _np.meshgrid( _np.linspace(xall.min(), xall.max(), nbins), _np.linspace(yall.min(), yall.max(), nbins), indexing='ij') z = griddata( _np.hstack([xall[:,None], yall[:,None]]), zall, (x, y), method=method) return x, y, z
[ "def", "get_grid_data", "(", "xall", ",", "yall", ",", "zall", ",", "nbins", "=", "100", ",", "method", "=", "'nearest'", ")", ":", "from", "scipy", ".", "interpolate", "import", "griddata", "x", ",", "y", "=", "_np", ".", "meshgrid", "(", "_np", ".", "linspace", "(", "xall", ".", "min", "(", ")", ",", "xall", ".", "max", "(", ")", ",", "nbins", ")", ",", "_np", ".", "linspace", "(", "yall", ".", "min", "(", ")", ",", "yall", ".", "max", "(", ")", ",", "nbins", ")", ",", "indexing", "=", "'ij'", ")", "z", "=", "griddata", "(", "_np", ".", "hstack", "(", "[", "xall", "[", ":", ",", "None", "]", ",", "yall", "[", ":", ",", "None", "]", "]", ")", ",", "zall", ",", "(", "x", ",", "y", ")", ",", "method", "=", "method", ")", "return", "x", ",", "y", ",", "z" ]
Interpolate unstructured two-dimensional data. Parameters ---------- xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. zall : ndarray(T) Sample z-coordinates. nbins : int, optional, default=100 Number of histogram bins used in x/y-dimensions. method : str, optional, default='nearest' Assignment method; scipy.interpolate.griddata supports the methods 'nearest', 'linear', and 'cubic'. Returns ------- x : ndarray(nbins, nbins) The bins' x-coordinates in meshgrid format. y : ndarray(nbins, nbins) The bins' y-coordinates in meshgrid format. z : ndarray(nbins, nbins) Interpolated z-data in meshgrid format.
[ "Interpolate", "unstructured", "two", "-", "dimensional", "data", "." ]
python
train
mushkevych/scheduler
synergy/supervisor/synergy_supervisor.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/supervisor/synergy_supervisor.py#L77-L98
def _poll_process(self, box_config): """ between killing a process and its actual termination lies poorly documented requirement - <purging process' io pipes and reading exit status>. this can be done either by os.wait() or process.wait() """ try: p = psutil.Process(box_config.pid) return_code = p.wait(timeout=0.01) if return_code is None: # process is already terminated self.logger.info('Process {0} is terminated'.format(box_config.process_name)) return else: # process is terminated; possibly by OS box_config.pid = None self.bc_dao.update(box_config) self.logger.info('Process {0} got terminated. Cleaning up'.format(box_config.process_name)) except TimeoutExpired: # process is alive and OK pass except Exception: self.logger.error('Exception on polling: {0}'.format(box_config.process_name), exc_info=True)
[ "def", "_poll_process", "(", "self", ",", "box_config", ")", ":", "try", ":", "p", "=", "psutil", ".", "Process", "(", "box_config", ".", "pid", ")", "return_code", "=", "p", ".", "wait", "(", "timeout", "=", "0.01", ")", "if", "return_code", "is", "None", ":", "# process is already terminated", "self", ".", "logger", ".", "info", "(", "'Process {0} is terminated'", ".", "format", "(", "box_config", ".", "process_name", ")", ")", "return", "else", ":", "# process is terminated; possibly by OS", "box_config", ".", "pid", "=", "None", "self", ".", "bc_dao", ".", "update", "(", "box_config", ")", "self", ".", "logger", ".", "info", "(", "'Process {0} got terminated. Cleaning up'", ".", "format", "(", "box_config", ".", "process_name", ")", ")", "except", "TimeoutExpired", ":", "# process is alive and OK", "pass", "except", "Exception", ":", "self", ".", "logger", ".", "error", "(", "'Exception on polling: {0}'", ".", "format", "(", "box_config", ".", "process_name", ")", ",", "exc_info", "=", "True", ")" ]
between killing a process and its actual termination lies poorly documented requirement - <purging process' io pipes and reading exit status>. this can be done either by os.wait() or process.wait()
[ "between", "killing", "a", "process", "and", "its", "actual", "termination", "lies", "poorly", "documented", "requirement", "-", "<purging", "process", "io", "pipes", "and", "reading", "exit", "status", ">", ".", "this", "can", "be", "done", "either", "by", "os", ".", "wait", "()", "or", "process", ".", "wait", "()" ]
python
train
spyder-ide/spyder
spyder/plugins/editor/panels/codefolding.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/panels/codefolding.py#L630-L639
def expand_all(self): """Expands all fold triggers.""" block = self.editor.document().firstBlock() while block.isValid(): TextBlockHelper.set_collapsed(block, False) block.setVisible(True) block = block.next() self._clear_block_deco() self._refresh_editor_and_scrollbars() self.expand_all_triggered.emit()
[ "def", "expand_all", "(", "self", ")", ":", "block", "=", "self", ".", "editor", ".", "document", "(", ")", ".", "firstBlock", "(", ")", "while", "block", ".", "isValid", "(", ")", ":", "TextBlockHelper", ".", "set_collapsed", "(", "block", ",", "False", ")", "block", ".", "setVisible", "(", "True", ")", "block", "=", "block", ".", "next", "(", ")", "self", ".", "_clear_block_deco", "(", ")", "self", ".", "_refresh_editor_and_scrollbars", "(", ")", "self", ".", "expand_all_triggered", ".", "emit", "(", ")" ]
Expands all fold triggers.
[ "Expands", "all", "fold", "triggers", "." ]
python
train
simion/pip-upgrader
pip_upgrader/packages_upgrader.py
https://github.com/simion/pip-upgrader/blob/716adca65d9ed56d4d416f94ede8a8e4fa8d640a/pip_upgrader/packages_upgrader.py#L30-L41
def _update_package(self, package): """ Update (install) the package in current environment, and if success, also replace version in file """ try: if not self.dry_run and not self.skip_package_installation: # pragma: nocover subprocess.check_call(['pip', 'install', '{}=={}'.format(package['name'], package['latest_version'])]) else: print('[Dry Run]: skipping package installation:', package['name']) # update only if installation success self._update_requirements_package(package) except CalledProcessError: # pragma: nocover print(Color('{{autored}}Failed to install package "{}"{{/autored}}'.format(package['name'])))
[ "def", "_update_package", "(", "self", ",", "package", ")", ":", "try", ":", "if", "not", "self", ".", "dry_run", "and", "not", "self", ".", "skip_package_installation", ":", "# pragma: nocover", "subprocess", ".", "check_call", "(", "[", "'pip'", ",", "'install'", ",", "'{}=={}'", ".", "format", "(", "package", "[", "'name'", "]", ",", "package", "[", "'latest_version'", "]", ")", "]", ")", "else", ":", "print", "(", "'[Dry Run]: skipping package installation:'", ",", "package", "[", "'name'", "]", ")", "# update only if installation success", "self", ".", "_update_requirements_package", "(", "package", ")", "except", "CalledProcessError", ":", "# pragma: nocover", "print", "(", "Color", "(", "'{{autored}}Failed to install package \"{}\"{{/autored}}'", ".", "format", "(", "package", "[", "'name'", "]", ")", ")", ")" ]
Update (install) the package in current environment, and if success, also replace version in file
[ "Update", "(", "install", ")", "the", "package", "in", "current", "environment", "and", "if", "success", "also", "replace", "version", "in", "file" ]
python
test
widdowquinn/pyani
pyani/pyani_tools.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L131-L156
def get_labels(filename, logger=None): """Returns a dictionary of alternative sequence labels, or None - filename - path to file containing tab-separated table of labels Input files should be formatted as <key>\t<label>, one pair per line. """ labeldict = {} if filename is not None: if logger: logger.info("Reading labels from %s", filename) with open(filename, "r") as ifh: count = 0 for line in ifh.readlines(): count += 1 try: key, label = line.strip().split("\t") except ValueError: if logger: logger.warning("Problem with class file: %s", filename) logger.warning("%d: %s", (count, line.strip())) logger.warning("(skipping line)") continue else: labeldict[key] = label return labeldict
[ "def", "get_labels", "(", "filename", ",", "logger", "=", "None", ")", ":", "labeldict", "=", "{", "}", "if", "filename", "is", "not", "None", ":", "if", "logger", ":", "logger", ".", "info", "(", "\"Reading labels from %s\"", ",", "filename", ")", "with", "open", "(", "filename", ",", "\"r\"", ")", "as", "ifh", ":", "count", "=", "0", "for", "line", "in", "ifh", ".", "readlines", "(", ")", ":", "count", "+=", "1", "try", ":", "key", ",", "label", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "except", "ValueError", ":", "if", "logger", ":", "logger", ".", "warning", "(", "\"Problem with class file: %s\"", ",", "filename", ")", "logger", ".", "warning", "(", "\"%d: %s\"", ",", "(", "count", ",", "line", ".", "strip", "(", ")", ")", ")", "logger", ".", "warning", "(", "\"(skipping line)\"", ")", "continue", "else", ":", "labeldict", "[", "key", "]", "=", "label", "return", "labeldict" ]
Returns a dictionary of alternative sequence labels, or None - filename - path to file containing tab-separated table of labels Input files should be formatted as <key>\t<label>, one pair per line.
[ "Returns", "a", "dictionary", "of", "alternative", "sequence", "labels", "or", "None" ]
python
train
kislyuk/aegea
aegea/packages/github3/repos/repo.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/repos/repo.py#L1401-L1411
def iter_languages(self, number=-1, etag=None): """Iterate over the programming languages used in the repository. :param int number: (optional), number of languages to return. Default: -1 returns all used languages :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of tuples """ url = self._build_url('languages', base_url=self._api) return self._iter(int(number), url, tuple, etag=etag)
[ "def", "iter_languages", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'languages'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "tuple", ",", "etag", "=", "etag", ")" ]
Iterate over the programming languages used in the repository. :param int number: (optional), number of languages to return. Default: -1 returns all used languages :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of tuples
[ "Iterate", "over", "the", "programming", "languages", "used", "in", "the", "repository", "." ]
python
train
KartikTalwar/Duolingo
duolingo.py
https://github.com/KartikTalwar/Duolingo/blob/0f7e9a0d4bfa864ade82890fca3789679ef38bee/duolingo.py#L50-L63
def _login(self): """ Authenticate through ``https://www.duolingo.com/login``. """ login_url = "https://www.duolingo.com/login" data = {"login": self.username, "password": self.password} request = self._make_req(login_url, data) attempt = request.json() if attempt.get('response') == 'OK': self.jwt = request.headers['jwt'] return True raise Exception("Login failed")
[ "def", "_login", "(", "self", ")", ":", "login_url", "=", "\"https://www.duolingo.com/login\"", "data", "=", "{", "\"login\"", ":", "self", ".", "username", ",", "\"password\"", ":", "self", ".", "password", "}", "request", "=", "self", ".", "_make_req", "(", "login_url", ",", "data", ")", "attempt", "=", "request", ".", "json", "(", ")", "if", "attempt", ".", "get", "(", "'response'", ")", "==", "'OK'", ":", "self", ".", "jwt", "=", "request", ".", "headers", "[", "'jwt'", "]", "return", "True", "raise", "Exception", "(", "\"Login failed\"", ")" ]
Authenticate through ``https://www.duolingo.com/login``.
[ "Authenticate", "through", "https", ":", "//", "www", ".", "duolingo", ".", "com", "/", "login", "." ]
python
train
asascience-open/paegan-transport
paegan/transport/particles/particle.py
https://github.com/asascience-open/paegan-transport/blob/99a7f4ea24f0f42d9b34d1fb0e87ab2c49315bd3/paegan/transport/particles/particle.py#L176-L201
def get_age(self, **kwargs): """ Returns the particlees age (how long it has been forced) in a variety of units. Rounded to 8 decimal places. Parameters: units (optional) = 'days' (default), 'hours', 'minutes', or 'seconds' """ try: units = kwargs.get('units', None) if units is None: return self._age units = units.lower() if units == "days": z = self._age elif units == "hours": z = self._age * 24. elif units == "minutes": z = self._age * 24. * 60. elif units == "seconds": z = self._age * 24. * 60. * 60. else: raise return round(z,8) except StandardError: raise KeyError("Could not return age of particle")
[ "def", "get_age", "(", "self", ",", "*", "*", "kwargs", ")", ":", "try", ":", "units", "=", "kwargs", ".", "get", "(", "'units'", ",", "None", ")", "if", "units", "is", "None", ":", "return", "self", ".", "_age", "units", "=", "units", ".", "lower", "(", ")", "if", "units", "==", "\"days\"", ":", "z", "=", "self", ".", "_age", "elif", "units", "==", "\"hours\"", ":", "z", "=", "self", ".", "_age", "*", "24.", "elif", "units", "==", "\"minutes\"", ":", "z", "=", "self", ".", "_age", "*", "24.", "*", "60.", "elif", "units", "==", "\"seconds\"", ":", "z", "=", "self", ".", "_age", "*", "24.", "*", "60.", "*", "60.", "else", ":", "raise", "return", "round", "(", "z", ",", "8", ")", "except", "StandardError", ":", "raise", "KeyError", "(", "\"Could not return age of particle\"", ")" ]
Returns the particlees age (how long it has been forced) in a variety of units. Rounded to 8 decimal places. Parameters: units (optional) = 'days' (default), 'hours', 'minutes', or 'seconds'
[ "Returns", "the", "particlees", "age", "(", "how", "long", "it", "has", "been", "forced", ")", "in", "a", "variety", "of", "units", ".", "Rounded", "to", "8", "decimal", "places", "." ]
python
train
matsui528/nanopq
nanopq/pq.py
https://github.com/matsui528/nanopq/blob/1ce68cad2e3cab62b409e6dd63f676ed7b443ee9/nanopq/pq.py#L114-L135
def decode(self, codes): """Given PQ-codes, reconstruct original D-dimensional vectors approximately by fetching the codewords. Args: codes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype. Each row is a PQ-code Returns: np.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32 """ assert codes.ndim == 2 N, M = codes.shape assert M == self.M assert codes.dtype == self.code_dtype vecs = np.empty((N, self.Ds * self.M), dtype=np.float32) for m in range(self.M): vecs[:, m * self.Ds : (m+1) * self.Ds] = self.codewords[m][codes[:, m], :] return vecs
[ "def", "decode", "(", "self", ",", "codes", ")", ":", "assert", "codes", ".", "ndim", "==", "2", "N", ",", "M", "=", "codes", ".", "shape", "assert", "M", "==", "self", ".", "M", "assert", "codes", ".", "dtype", "==", "self", ".", "code_dtype", "vecs", "=", "np", ".", "empty", "(", "(", "N", ",", "self", ".", "Ds", "*", "self", ".", "M", ")", ",", "dtype", "=", "np", ".", "float32", ")", "for", "m", "in", "range", "(", "self", ".", "M", ")", ":", "vecs", "[", ":", ",", "m", "*", "self", ".", "Ds", ":", "(", "m", "+", "1", ")", "*", "self", ".", "Ds", "]", "=", "self", ".", "codewords", "[", "m", "]", "[", "codes", "[", ":", ",", "m", "]", ",", ":", "]", "return", "vecs" ]
Given PQ-codes, reconstruct original D-dimensional vectors approximately by fetching the codewords. Args: codes (np.ndarray): PQ-cdoes with shape=(N, M) and dtype=self.code_dtype. Each row is a PQ-code Returns: np.ndarray: Reconstructed vectors with shape=(N, D) and dtype=np.float32
[ "Given", "PQ", "-", "codes", "reconstruct", "original", "D", "-", "dimensional", "vectors", "approximately", "by", "fetching", "the", "codewords", "." ]
python
train
pygridtools/gridmap
examples/map_reduce.py
https://github.com/pygridtools/gridmap/blob/be4fb1478ab8d19fa3acddecdf1a5d8bd3789127/examples/map_reduce.py#L39-L48
def sleep_walk(secs): ''' Pass the time by adding numbers until the specified number of seconds has elapsed. Intended as a replacement for ``time.sleep`` that doesn't leave the CPU idle (which will make the job seem like it's stalled). ''' start_time = datetime.now() num = 0 while (datetime.now() - start_time).seconds < secs: num = num + 1
[ "def", "sleep_walk", "(", "secs", ")", ":", "start_time", "=", "datetime", ".", "now", "(", ")", "num", "=", "0", "while", "(", "datetime", ".", "now", "(", ")", "-", "start_time", ")", ".", "seconds", "<", "secs", ":", "num", "=", "num", "+", "1" ]
Pass the time by adding numbers until the specified number of seconds has elapsed. Intended as a replacement for ``time.sleep`` that doesn't leave the CPU idle (which will make the job seem like it's stalled).
[ "Pass", "the", "time", "by", "adding", "numbers", "until", "the", "specified", "number", "of", "seconds", "has", "elapsed", ".", "Intended", "as", "a", "replacement", "for", "time", ".", "sleep", "that", "doesn", "t", "leave", "the", "CPU", "idle", "(", "which", "will", "make", "the", "job", "seem", "like", "it", "s", "stalled", ")", "." ]
python
train
cthorey/pdsimage
pdsimage/PDS_Extractor.py
https://github.com/cthorey/pdsimage/blob/f71de6dfddd3d538d76da229b4b9605c40f3fbac/pdsimage/PDS_Extractor.py#L276-L293
def lat_id(self, line): ''' Return the corresponding latitude Args: line (int): Line number Returns: Correponding latitude in degree ''' if self.grid == 'WAC': lat = ((1 + self.LINE_PROJECTION_OFFSET - line) * self.MAP_SCALE * 1e-3 / self.A_AXIS_RADIUS) return lat * 180 / np.pi else: lat = float(self.CENTER_LATITUDE) - \ (line - float(self.LINE_PROJECTION_OFFSET) - 1)\ / float(self.MAP_RESOLUTION) return lat
[ "def", "lat_id", "(", "self", ",", "line", ")", ":", "if", "self", ".", "grid", "==", "'WAC'", ":", "lat", "=", "(", "(", "1", "+", "self", ".", "LINE_PROJECTION_OFFSET", "-", "line", ")", "*", "self", ".", "MAP_SCALE", "*", "1e-3", "/", "self", ".", "A_AXIS_RADIUS", ")", "return", "lat", "*", "180", "/", "np", ".", "pi", "else", ":", "lat", "=", "float", "(", "self", ".", "CENTER_LATITUDE", ")", "-", "(", "line", "-", "float", "(", "self", ".", "LINE_PROJECTION_OFFSET", ")", "-", "1", ")", "/", "float", "(", "self", ".", "MAP_RESOLUTION", ")", "return", "lat" ]
Return the corresponding latitude Args: line (int): Line number Returns: Correponding latitude in degree
[ "Return", "the", "corresponding", "latitude" ]
python
train
ethereum/py-trie
trie/utils/binaries.py
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L56-L66
def decode_to_bin_keypath(path): """ Decodes bytes into a sequence of 0s and 1s Used in decoding key path of a KV-NODE """ path = encode_to_bin(path) if path[0] == 1: path = path[4:] assert path[0:2] == PREFIX_00 padded_len = TWO_BITS.index(path[2:4]) return path[4+((4 - padded_len) % 4):]
[ "def", "decode_to_bin_keypath", "(", "path", ")", ":", "path", "=", "encode_to_bin", "(", "path", ")", "if", "path", "[", "0", "]", "==", "1", ":", "path", "=", "path", "[", "4", ":", "]", "assert", "path", "[", "0", ":", "2", "]", "==", "PREFIX_00", "padded_len", "=", "TWO_BITS", ".", "index", "(", "path", "[", "2", ":", "4", "]", ")", "return", "path", "[", "4", "+", "(", "(", "4", "-", "padded_len", ")", "%", "4", ")", ":", "]" ]
Decodes bytes into a sequence of 0s and 1s Used in decoding key path of a KV-NODE
[ "Decodes", "bytes", "into", "a", "sequence", "of", "0s", "and", "1s", "Used", "in", "decoding", "key", "path", "of", "a", "KV", "-", "NODE" ]
python
train
bwohlberg/sporco
sporco/dictlrn/prlcnscdl.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/prlcnscdl.py#L205-L214
def ccmod_xstep(k): """Do the X step of the ccmod stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ YU = mp_D_Y - mp_D_U[k] b = mp_ZSf[k] + mp_drho * sl.rfftn(YU, None, mp_cri.axisN) Xf = sl.solvedbi_sm(mp_Zf[k], mp_drho, b, axis=mp_cri.axisM) mp_D_X[k] = sl.irfftn(Xf, mp_cri.Nv, mp_cri.axisN)
[ "def", "ccmod_xstep", "(", "k", ")", ":", "YU", "=", "mp_D_Y", "-", "mp_D_U", "[", "k", "]", "b", "=", "mp_ZSf", "[", "k", "]", "+", "mp_drho", "*", "sl", ".", "rfftn", "(", "YU", ",", "None", ",", "mp_cri", ".", "axisN", ")", "Xf", "=", "sl", ".", "solvedbi_sm", "(", "mp_Zf", "[", "k", "]", ",", "mp_drho", ",", "b", ",", "axis", "=", "mp_cri", ".", "axisM", ")", "mp_D_X", "[", "k", "]", "=", "sl", ".", "irfftn", "(", "Xf", ",", "mp_cri", ".", "Nv", ",", "mp_cri", ".", "axisN", ")" ]
Do the X step of the ccmod stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables.
[ "Do", "the", "X", "step", "of", "the", "ccmod", "stage", ".", "The", "only", "parameter", "is", "the", "slice", "index", "k", "and", "there", "are", "no", "return", "values", ";", "all", "inputs", "and", "outputs", "are", "from", "and", "to", "global", "variables", "." ]
python
train
Opentrons/opentrons
api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py#L724-L742
def _generate_current_command(self): ''' Returns a constructed GCode string that contains this driver's axis-current settings, plus a small delay to wait for those settings to take effect. ''' values = ['{}{}'.format(axis, value) for axis, value in sorted(self.current.items())] current_cmd = '{} {}'.format( GCODES['SET_CURRENT'], ' '.join(values) ) command = '{currents} {code}P{seconds}'.format( currents=current_cmd, code=GCODES['DWELL'], seconds=CURRENT_CHANGE_DELAY ) log.debug("_generate_current_command: {}".format(command)) return command
[ "def", "_generate_current_command", "(", "self", ")", ":", "values", "=", "[", "'{}{}'", ".", "format", "(", "axis", ",", "value", ")", "for", "axis", ",", "value", "in", "sorted", "(", "self", ".", "current", ".", "items", "(", ")", ")", "]", "current_cmd", "=", "'{} {}'", ".", "format", "(", "GCODES", "[", "'SET_CURRENT'", "]", ",", "' '", ".", "join", "(", "values", ")", ")", "command", "=", "'{currents} {code}P{seconds}'", ".", "format", "(", "currents", "=", "current_cmd", ",", "code", "=", "GCODES", "[", "'DWELL'", "]", ",", "seconds", "=", "CURRENT_CHANGE_DELAY", ")", "log", ".", "debug", "(", "\"_generate_current_command: {}\"", ".", "format", "(", "command", ")", ")", "return", "command" ]
Returns a constructed GCode string that contains this driver's axis-current settings, plus a small delay to wait for those settings to take effect.
[ "Returns", "a", "constructed", "GCode", "string", "that", "contains", "this", "driver", "s", "axis", "-", "current", "settings", "plus", "a", "small", "delay", "to", "wait", "for", "those", "settings", "to", "take", "effect", "." ]
python
train
APSL/transmanager
transmanager/manager.py
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L647-L678
def create_translations_for_item_and_its_children(self, item, languages=None): """ Creates the translations from an item and defined languages and return the id's of the created tasks :param item: (master) :param languages: :return: """ if not self.master: self.set_master(item) if not languages: languages = self.get_languages() result_ids = [] # first process main object fields = self._get_translated_field_names(item) tasks = self.create_from_item(languages, item, fields) if tasks: result_ids += [task.pk for task in tasks] # then process child objects from main children = self.get_translatable_children(item) for child in children: fields = self._get_translated_field_names(child) tasks = self.create_from_item(languages, child, fields) if tasks: result_ids += [task.pk for task in tasks] return result_ids
[ "def", "create_translations_for_item_and_its_children", "(", "self", ",", "item", ",", "languages", "=", "None", ")", ":", "if", "not", "self", ".", "master", ":", "self", ".", "set_master", "(", "item", ")", "if", "not", "languages", ":", "languages", "=", "self", ".", "get_languages", "(", ")", "result_ids", "=", "[", "]", "# first process main object", "fields", "=", "self", ".", "_get_translated_field_names", "(", "item", ")", "tasks", "=", "self", ".", "create_from_item", "(", "languages", ",", "item", ",", "fields", ")", "if", "tasks", ":", "result_ids", "+=", "[", "task", ".", "pk", "for", "task", "in", "tasks", "]", "# then process child objects from main", "children", "=", "self", ".", "get_translatable_children", "(", "item", ")", "for", "child", "in", "children", ":", "fields", "=", "self", ".", "_get_translated_field_names", "(", "child", ")", "tasks", "=", "self", ".", "create_from_item", "(", "languages", ",", "child", ",", "fields", ")", "if", "tasks", ":", "result_ids", "+=", "[", "task", ".", "pk", "for", "task", "in", "tasks", "]", "return", "result_ids" ]
Creates the translations from an item and defined languages and return the id's of the created tasks :param item: (master) :param languages: :return:
[ "Creates", "the", "translations", "from", "an", "item", "and", "defined", "languages", "and", "return", "the", "id", "s", "of", "the", "created", "tasks" ]
python
train
ecell/ecell4
ecell4/util/decorator.py
https://github.com/ecell/ecell4/blob/a4a1229661c39b2059adbbacae9090e5ba664e01/ecell4/util/decorator.py#L143-L194
def get_model(is_netfree=False, without_reset=False, seeds=None, effective=False): """ Generate a model with parameters in the global scope, ``SPECIES_ATTRIBUTES`` and ``REACTIONRULES``. Parameters ---------- is_netfree : bool, optional Return ``NetfreeModel`` if True, and ``NetworkModel`` if else. Default is False. without_reset : bool, optional Do not reset the global variables after the generation if True. Default is False. seeds : list, optional A list of seed ``Species`` for expanding the model. If this is not None, generate a ``NetfreeModel`` once, and return a ``NetworkModel``, which is an expanded form of that with the given seeds. Default is None. effective : bool, optional See ``NetfreeModel.effective`` and ``Netfree.set_effective``. Only meaningfull with option ``is_netfree=True``. Default is False Returns ------- model : NetworkModel, NetfreeModel """ try: if seeds is not None or is_netfree: m = ecell4_base.core.NetfreeModel() else: m = ecell4_base.core.NetworkModel() for sp in SPECIES_ATTRIBUTES: m.add_species_attribute(sp) for rr in REACTION_RULES: m.add_reaction_rule(rr) if not without_reset: reset_model() if seeds is not None: return m.expand(seeds) if isinstance(m, ecell4_base.core.NetfreeModel): m.set_effective(effective) except Exception as e: reset_model() raise e return m
[ "def", "get_model", "(", "is_netfree", "=", "False", ",", "without_reset", "=", "False", ",", "seeds", "=", "None", ",", "effective", "=", "False", ")", ":", "try", ":", "if", "seeds", "is", "not", "None", "or", "is_netfree", ":", "m", "=", "ecell4_base", ".", "core", ".", "NetfreeModel", "(", ")", "else", ":", "m", "=", "ecell4_base", ".", "core", ".", "NetworkModel", "(", ")", "for", "sp", "in", "SPECIES_ATTRIBUTES", ":", "m", ".", "add_species_attribute", "(", "sp", ")", "for", "rr", "in", "REACTION_RULES", ":", "m", ".", "add_reaction_rule", "(", "rr", ")", "if", "not", "without_reset", ":", "reset_model", "(", ")", "if", "seeds", "is", "not", "None", ":", "return", "m", ".", "expand", "(", "seeds", ")", "if", "isinstance", "(", "m", ",", "ecell4_base", ".", "core", ".", "NetfreeModel", ")", ":", "m", ".", "set_effective", "(", "effective", ")", "except", "Exception", "as", "e", ":", "reset_model", "(", ")", "raise", "e", "return", "m" ]
Generate a model with parameters in the global scope, ``SPECIES_ATTRIBUTES`` and ``REACTIONRULES``. Parameters ---------- is_netfree : bool, optional Return ``NetfreeModel`` if True, and ``NetworkModel`` if else. Default is False. without_reset : bool, optional Do not reset the global variables after the generation if True. Default is False. seeds : list, optional A list of seed ``Species`` for expanding the model. If this is not None, generate a ``NetfreeModel`` once, and return a ``NetworkModel``, which is an expanded form of that with the given seeds. Default is None. effective : bool, optional See ``NetfreeModel.effective`` and ``Netfree.set_effective``. Only meaningfull with option ``is_netfree=True``. Default is False Returns ------- model : NetworkModel, NetfreeModel
[ "Generate", "a", "model", "with", "parameters", "in", "the", "global", "scope", "SPECIES_ATTRIBUTES", "and", "REACTIONRULES", "." ]
python
train
PyCQA/astroid
astroid/brain/brain_functools.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/brain/brain_functools.py#L121-L130
def _looks_like_lru_cache(node): """Check if the given function node is decorated with lru_cache.""" if not node.decorators: return False for decorator in node.decorators.nodes: if not isinstance(decorator, astroid.Call): continue if _looks_like_functools_member(decorator, "lru_cache"): return True return False
[ "def", "_looks_like_lru_cache", "(", "node", ")", ":", "if", "not", "node", ".", "decorators", ":", "return", "False", "for", "decorator", "in", "node", ".", "decorators", ".", "nodes", ":", "if", "not", "isinstance", "(", "decorator", ",", "astroid", ".", "Call", ")", ":", "continue", "if", "_looks_like_functools_member", "(", "decorator", ",", "\"lru_cache\"", ")", ":", "return", "True", "return", "False" ]
Check if the given function node is decorated with lru_cache.
[ "Check", "if", "the", "given", "function", "node", "is", "decorated", "with", "lru_cache", "." ]
python
train
numba/llvmlite
llvmlite/binding/module.py
https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/llvmlite/binding/module.py#L30-L45
def parse_bitcode(bitcode, context=None): """ Create Module from a LLVM *bitcode* (a bytes object). """ if context is None: context = get_global_context() buf = c_char_p(bitcode) bufsize = len(bitcode) with ffi.OutputString() as errmsg: mod = ModuleRef(ffi.lib.LLVMPY_ParseBitcode( context, buf, bufsize, errmsg), context) if errmsg: mod.close() raise RuntimeError( "LLVM bitcode parsing error\n{0}".format(errmsg)) return mod
[ "def", "parse_bitcode", "(", "bitcode", ",", "context", "=", "None", ")", ":", "if", "context", "is", "None", ":", "context", "=", "get_global_context", "(", ")", "buf", "=", "c_char_p", "(", "bitcode", ")", "bufsize", "=", "len", "(", "bitcode", ")", "with", "ffi", ".", "OutputString", "(", ")", "as", "errmsg", ":", "mod", "=", "ModuleRef", "(", "ffi", ".", "lib", ".", "LLVMPY_ParseBitcode", "(", "context", ",", "buf", ",", "bufsize", ",", "errmsg", ")", ",", "context", ")", "if", "errmsg", ":", "mod", ".", "close", "(", ")", "raise", "RuntimeError", "(", "\"LLVM bitcode parsing error\\n{0}\"", ".", "format", "(", "errmsg", ")", ")", "return", "mod" ]
Create Module from a LLVM *bitcode* (a bytes object).
[ "Create", "Module", "from", "a", "LLVM", "*", "bitcode", "*", "(", "a", "bytes", "object", ")", "." ]
python
train
argaen/aiocache
aiocache/serializers/serializers.py
https://github.com/argaen/aiocache/blob/fdd282f37283ca04e22209f4d2ae4900f29e1688/aiocache/serializers/serializers.py#L178-L188
def loads(self, value): """ Deserialize value using ``msgpack.loads``. :param value: bytes :returns: obj """ raw = False if self.encoding == "utf-8" else True if value is None: return None return msgpack.loads(value, raw=raw, use_list=self.use_list)
[ "def", "loads", "(", "self", ",", "value", ")", ":", "raw", "=", "False", "if", "self", ".", "encoding", "==", "\"utf-8\"", "else", "True", "if", "value", "is", "None", ":", "return", "None", "return", "msgpack", ".", "loads", "(", "value", ",", "raw", "=", "raw", ",", "use_list", "=", "self", ".", "use_list", ")" ]
Deserialize value using ``msgpack.loads``. :param value: bytes :returns: obj
[ "Deserialize", "value", "using", "msgpack", ".", "loads", "." ]
python
train
jxtech/wechatpy
wechatpy/client/api/card.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/card.py#L453-L471
def update_movie_ticket(self, code, ticket_class, show_time, duration, screening_room, seat_number, card_id=None): """ 更新电影票 """ ticket = { 'code': code, 'ticket_class': ticket_class, 'show_time': show_time, 'duration': duration, 'screening_room': screening_room, 'seat_number': seat_number } if card_id: ticket['card_id'] = card_id return self._post( 'card/movieticket/updateuser', data=ticket )
[ "def", "update_movie_ticket", "(", "self", ",", "code", ",", "ticket_class", ",", "show_time", ",", "duration", ",", "screening_room", ",", "seat_number", ",", "card_id", "=", "None", ")", ":", "ticket", "=", "{", "'code'", ":", "code", ",", "'ticket_class'", ":", "ticket_class", ",", "'show_time'", ":", "show_time", ",", "'duration'", ":", "duration", ",", "'screening_room'", ":", "screening_room", ",", "'seat_number'", ":", "seat_number", "}", "if", "card_id", ":", "ticket", "[", "'card_id'", "]", "=", "card_id", "return", "self", ".", "_post", "(", "'card/movieticket/updateuser'", ",", "data", "=", "ticket", ")" ]
更新电影票
[ "更新电影票" ]
python
train
westonplatter/fast_arrow
fast_arrow/resources/option_position.py
https://github.com/westonplatter/fast_arrow/blob/514cbca4994f52a97222058167830a302e313d04/fast_arrow/resources/option_position.py#L47-L61
def mergein_marketdata_list(cls, client, option_positions): """ Fetch and merge in Marketdata for each option position """ ids = cls._extract_ids(option_positions) mds = OptionMarketdata.quotes_by_instrument_ids(client, ids) results = [] for op in option_positions: # @TODO optimize this so it's better than O(n^2) md = [x for x in mds if x['instrument'] == op['option']][0] # there is no overlap in keys so this is fine merged_dict = dict(list(op.items()) + list(md.items())) results.append(merged_dict) return results
[ "def", "mergein_marketdata_list", "(", "cls", ",", "client", ",", "option_positions", ")", ":", "ids", "=", "cls", ".", "_extract_ids", "(", "option_positions", ")", "mds", "=", "OptionMarketdata", ".", "quotes_by_instrument_ids", "(", "client", ",", "ids", ")", "results", "=", "[", "]", "for", "op", "in", "option_positions", ":", "# @TODO optimize this so it's better than O(n^2)", "md", "=", "[", "x", "for", "x", "in", "mds", "if", "x", "[", "'instrument'", "]", "==", "op", "[", "'option'", "]", "]", "[", "0", "]", "# there is no overlap in keys so this is fine", "merged_dict", "=", "dict", "(", "list", "(", "op", ".", "items", "(", ")", ")", "+", "list", "(", "md", ".", "items", "(", ")", ")", ")", "results", ".", "append", "(", "merged_dict", ")", "return", "results" ]
Fetch and merge in Marketdata for each option position
[ "Fetch", "and", "merge", "in", "Marketdata", "for", "each", "option", "position" ]
python
train
sibirrer/lenstronomy
lenstronomy/LensModel/single_plane.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/single_plane.py#L171-L189
def mass_3d(self, r, kwargs, bool_list=None): """ computes the mass within a 3d sphere of radius r :param r: radius (in angular units) :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param bool_list: list of bools that are part of the output :return: mass (in angular units, modulo epsilon_crit) """ bool_list = self._bool_list(bool_list) mass_3d = 0 for i, func in enumerate(self.func_list): if bool_list[i] is True: kwargs_i = {k:v for k, v in kwargs[i].items() if not k in ['center_x', 'center_y']} mass_3d_i = func.mass_3d_lens(r, **kwargs_i) mass_3d += mass_3d_i #except: # raise ValueError('Lens profile %s does not support a 3d mass function!' % self.model_list[i]) return mass_3d
[ "def", "mass_3d", "(", "self", ",", "r", ",", "kwargs", ",", "bool_list", "=", "None", ")", ":", "bool_list", "=", "self", ".", "_bool_list", "(", "bool_list", ")", "mass_3d", "=", "0", "for", "i", ",", "func", "in", "enumerate", "(", "self", ".", "func_list", ")", ":", "if", "bool_list", "[", "i", "]", "is", "True", ":", "kwargs_i", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwargs", "[", "i", "]", ".", "items", "(", ")", "if", "not", "k", "in", "[", "'center_x'", ",", "'center_y'", "]", "}", "mass_3d_i", "=", "func", ".", "mass_3d_lens", "(", "r", ",", "*", "*", "kwargs_i", ")", "mass_3d", "+=", "mass_3d_i", "#except:", "# raise ValueError('Lens profile %s does not support a 3d mass function!' % self.model_list[i])", "return", "mass_3d" ]
computes the mass within a 3d sphere of radius r :param r: radius (in angular units) :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param bool_list: list of bools that are part of the output :return: mass (in angular units, modulo epsilon_crit)
[ "computes", "the", "mass", "within", "a", "3d", "sphere", "of", "radius", "r" ]
python
train
mozilla/treeherder
treeherder/seta/update_job_priority.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/update_job_priority.py#L38-L46
def _unique_key(job): """Return a key to query our uniqueness mapping system. This makes sure that we use a consistent key between our code and selecting jobs from the table. """ return unique_key(testtype=str(job['testtype']), buildtype=str(job['platform_option']), platform=str(job['platform']))
[ "def", "_unique_key", "(", "job", ")", ":", "return", "unique_key", "(", "testtype", "=", "str", "(", "job", "[", "'testtype'", "]", ")", ",", "buildtype", "=", "str", "(", "job", "[", "'platform_option'", "]", ")", ",", "platform", "=", "str", "(", "job", "[", "'platform'", "]", ")", ")" ]
Return a key to query our uniqueness mapping system. This makes sure that we use a consistent key between our code and selecting jobs from the table.
[ "Return", "a", "key", "to", "query", "our", "uniqueness", "mapping", "system", "." ]
python
train
yougov/solr-doc-manager
mongo_connector/doc_managers/solr_doc_manager.py
https://github.com/yougov/solr-doc-manager/blob/1978bf6f3387b1afd6dd6b41a1bbaea9932d60fd/mongo_connector/doc_managers/solr_doc_manager.py#L320-L326
def remove(self, document_id, namespace, timestamp): """Removes documents from Solr The input is a python dictionary that represents a mongo document. """ self.solr.delete(id=u(document_id), commit=(self.auto_commit_interval == 0))
[ "def", "remove", "(", "self", ",", "document_id", ",", "namespace", ",", "timestamp", ")", ":", "self", ".", "solr", ".", "delete", "(", "id", "=", "u", "(", "document_id", ")", ",", "commit", "=", "(", "self", ".", "auto_commit_interval", "==", "0", ")", ")" ]
Removes documents from Solr The input is a python dictionary that represents a mongo document.
[ "Removes", "documents", "from", "Solr" ]
python
train
hugapi/hug
hug/output_format.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/output_format.py#L83-L92
def json_convert(*kinds): """Registers the wrapped method as a JSON converter for the provided types. NOTE: custom converters are always globally applied """ def register_json_converter(function): for kind in kinds: json_converters[kind] = function return function return register_json_converter
[ "def", "json_convert", "(", "*", "kinds", ")", ":", "def", "register_json_converter", "(", "function", ")", ":", "for", "kind", "in", "kinds", ":", "json_converters", "[", "kind", "]", "=", "function", "return", "function", "return", "register_json_converter" ]
Registers the wrapped method as a JSON converter for the provided types. NOTE: custom converters are always globally applied
[ "Registers", "the", "wrapped", "method", "as", "a", "JSON", "converter", "for", "the", "provided", "types", "." ]
python
train
reingart/pyafipws
wslpg.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslpg.py#L2620-L2650
def ConsultarTiposOperacion(self, sep="||"): "Consulta tipo de Operación por Actividad." ops = [] ret = self.client.tipoActividadConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['tipoActividadReturn'] self.__analizar_errores(ret) for it_act in ret.get('tiposActividad', []): ret = self.client.tipoOperacionXActividadConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, nroActLiquida=it_act['codigoDescripcion']['codigo'], )['tipoOperacionReturn'] self.__analizar_errores(ret) array = ret.get('tiposOperacion', []) if sep: ops.extend([("%s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep)) % (it_act['codigoDescripcion']['codigo'], it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array]) else: ops.extend([(it_act['codigoDescripcion']['codigo'], it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array]) return ops
[ "def", "ConsultarTiposOperacion", "(", "self", ",", "sep", "=", "\"||\"", ")", ":", "ops", "=", "[", "]", "ret", "=", "self", ".", "client", ".", "tipoActividadConsultar", "(", "auth", "=", "{", "'token'", ":", "self", ".", "Token", ",", "'sign'", ":", "self", ".", "Sign", ",", "'cuit'", ":", "self", ".", "Cuit", ",", "}", ",", ")", "[", "'tipoActividadReturn'", "]", "self", ".", "__analizar_errores", "(", "ret", ")", "for", "it_act", "in", "ret", ".", "get", "(", "'tiposActividad'", ",", "[", "]", ")", ":", "ret", "=", "self", ".", "client", ".", "tipoOperacionXActividadConsultar", "(", "auth", "=", "{", "'token'", ":", "self", ".", "Token", ",", "'sign'", ":", "self", ".", "Sign", ",", "'cuit'", ":", "self", ".", "Cuit", ",", "}", ",", "nroActLiquida", "=", "it_act", "[", "'codigoDescripcion'", "]", "[", "'codigo'", "]", ",", ")", "[", "'tipoOperacionReturn'", "]", "self", ".", "__analizar_errores", "(", "ret", ")", "array", "=", "ret", ".", "get", "(", "'tiposOperacion'", ",", "[", "]", ")", "if", "sep", ":", "ops", ".", "extend", "(", "[", "(", "\"%s %%s %s %%s %s %%s %s\"", "%", "(", "sep", ",", "sep", ",", "sep", ",", "sep", ")", ")", "%", "(", "it_act", "[", "'codigoDescripcion'", "]", "[", "'codigo'", "]", ",", "it", "[", "'codigoDescripcion'", "]", "[", "'codigo'", "]", ",", "it", "[", "'codigoDescripcion'", "]", "[", "'descripcion'", "]", ")", "for", "it", "in", "array", "]", ")", "else", ":", "ops", ".", "extend", "(", "[", "(", "it_act", "[", "'codigoDescripcion'", "]", "[", "'codigo'", "]", ",", "it", "[", "'codigoDescripcion'", "]", "[", "'codigo'", "]", ",", "it", "[", "'codigoDescripcion'", "]", "[", "'descripcion'", "]", ")", "for", "it", "in", "array", "]", ")", "return", "ops" ]
Consulta tipo de Operación por Actividad.
[ "Consulta", "tipo", "de", "Operación", "por", "Actividad", "." ]
python
train
mitsei/dlkit
dlkit/records/assessment/mecqbank/mecqbank_base_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/mecqbank/mecqbank_base_records.py#L199-L224
def _init_metadata(self): """stub""" super(SimpleDifficultyItemFormRecord, self)._init_metadata() self._min_string_length = None self._max_string_length = None self._difficulty_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'text'), 'element_label': 'Text', 'instructions': 'enter a text string', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_string_values': [{ 'text': '', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }], 'syntax': 'STRING', 'minimum_string_length': self._min_string_length, 'maximum_string_length': self._max_string_length, 'string_set': [] }
[ "def", "_init_metadata", "(", "self", ")", ":", "super", "(", "SimpleDifficultyItemFormRecord", ",", "self", ")", ".", "_init_metadata", "(", ")", "self", ".", "_min_string_length", "=", "None", "self", ".", "_max_string_length", "=", "None", "self", ".", "_difficulty_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'text'", ")", ",", "'element_label'", ":", "'Text'", ",", "'instructions'", ":", "'enter a text string'", ",", "'required'", ":", "False", ",", "'read_only'", ":", "False", ",", "'linked'", ":", "False", ",", "'array'", ":", "False", ",", "'default_string_values'", ":", "[", "{", "'text'", ":", "''", ",", "'languageTypeId'", ":", "str", "(", "DEFAULT_LANGUAGE_TYPE", ")", ",", "'scriptTypeId'", ":", "str", "(", "DEFAULT_SCRIPT_TYPE", ")", ",", "'formatTypeId'", ":", "str", "(", "DEFAULT_FORMAT_TYPE", ")", ",", "}", "]", ",", "'syntax'", ":", "'STRING'", ",", "'minimum_string_length'", ":", "self", ".", "_min_string_length", ",", "'maximum_string_length'", ":", "self", ".", "_max_string_length", ",", "'string_set'", ":", "[", "]", "}" ]
stub
[ "stub" ]
python
train
FujiMakoto/IPS-Vagrant
ips_vagrant/commands/enable/__init__.py
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/commands/enable/__init__.py#L14-L40
def cli(ctx, dname, site): """ Enable the <site> under the specified <domain> """ assert isinstance(ctx, Context) dname = domain_parse(dname).hostname domain = Session.query(Domain).filter(Domain.name == dname).first() if not domain: click.secho('No such domain: {dn}'.format(dn=dname), fg='red', bold=True, err=True) return site_name = site site = Site.get(domain, site_name) if not site: click.secho('No such site: {site}'.format(site=site_name), fg='red', bold=True, err=True) return p = Echo('Constructing paths and configuration files...') site.enable() p.done() # Restart Nginx p = Echo('Restarting web server...') FNULL = open(os.devnull, 'w') subprocess.check_call(['service', 'nginx', 'restart'], stdout=FNULL, stderr=subprocess.STDOUT) p.done()
[ "def", "cli", "(", "ctx", ",", "dname", ",", "site", ")", ":", "assert", "isinstance", "(", "ctx", ",", "Context", ")", "dname", "=", "domain_parse", "(", "dname", ")", ".", "hostname", "domain", "=", "Session", ".", "query", "(", "Domain", ")", ".", "filter", "(", "Domain", ".", "name", "==", "dname", ")", ".", "first", "(", ")", "if", "not", "domain", ":", "click", ".", "secho", "(", "'No such domain: {dn}'", ".", "format", "(", "dn", "=", "dname", ")", ",", "fg", "=", "'red'", ",", "bold", "=", "True", ",", "err", "=", "True", ")", "return", "site_name", "=", "site", "site", "=", "Site", ".", "get", "(", "domain", ",", "site_name", ")", "if", "not", "site", ":", "click", ".", "secho", "(", "'No such site: {site}'", ".", "format", "(", "site", "=", "site_name", ")", ",", "fg", "=", "'red'", ",", "bold", "=", "True", ",", "err", "=", "True", ")", "return", "p", "=", "Echo", "(", "'Constructing paths and configuration files...'", ")", "site", ".", "enable", "(", ")", "p", ".", "done", "(", ")", "# Restart Nginx", "p", "=", "Echo", "(", "'Restarting web server...'", ")", "FNULL", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "subprocess", ".", "check_call", "(", "[", "'service'", ",", "'nginx'", ",", "'restart'", "]", ",", "stdout", "=", "FNULL", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "p", ".", "done", "(", ")" ]
Enable the <site> under the specified <domain>
[ "Enable", "the", "<site", ">", "under", "the", "specified", "<domain", ">" ]
python
train
dmbee/seglearn
seglearn/split.py
https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/split.py#L132-L184
def temporal_split(X, y, test_size=0.25): ''' Split time series or sequence data along the time axis. Test data is drawn from the end of each series / sequence Parameters ---------- X : array-like, shape [n_series, ...] Time series data and (optionally) contextual data y : array-like shape [n_series, ] target vector test_size : float between 0 and 1, amount to allocate to test Returns ------- X_train : array-like, shape [n_series, ] X_test : array-like, shape [n_series, ] y_train : array-like, shape [n_series, ] y_test : array-like, shape [n_series, ] ''' if test_size <= 0. or test_size >= 1.: raise ValueError("temporal_split: test_size must be >= 0.0 and <= 1.0" " (was %.1f)" %test_size) Ns = len(y) # number of series check_ts_data(X, y) Xt, Xc = get_ts_data_parts(X) train_size = 1. - test_size train_ind = [np.arange(0, int(train_size * len(Xt[i]))) for i in range(Ns)] test_ind = [np.arange(len(train_ind[i]), len(Xt[i])) for i in range(Ns)] X_train = [Xt[i][train_ind[i]] for i in range(Ns)] X_test = [Xt[i][test_ind[i]] for i in range(Ns)] if Xc is not None: X_train = TS_Data(X_train, Xc) X_test = TS_Data(X_test, Xc) if len(np.atleast_1d(y[0])) == len(Xt[0]): # y is a time series y_train = [y[i][train_ind[i]] for i in range(Ns)] y_test = [y[i][test_ind[i]] for i in range(Ns)] else: # y is contextual y_train = y y_test = y return X_train, X_test, y_train, y_test
[ "def", "temporal_split", "(", "X", ",", "y", ",", "test_size", "=", "0.25", ")", ":", "if", "test_size", "<=", "0.", "or", "test_size", ">=", "1.", ":", "raise", "ValueError", "(", "\"temporal_split: test_size must be >= 0.0 and <= 1.0\"", "\" (was %.1f)\"", "%", "test_size", ")", "Ns", "=", "len", "(", "y", ")", "# number of series", "check_ts_data", "(", "X", ",", "y", ")", "Xt", ",", "Xc", "=", "get_ts_data_parts", "(", "X", ")", "train_size", "=", "1.", "-", "test_size", "train_ind", "=", "[", "np", ".", "arange", "(", "0", ",", "int", "(", "train_size", "*", "len", "(", "Xt", "[", "i", "]", ")", ")", ")", "for", "i", "in", "range", "(", "Ns", ")", "]", "test_ind", "=", "[", "np", ".", "arange", "(", "len", "(", "train_ind", "[", "i", "]", ")", ",", "len", "(", "Xt", "[", "i", "]", ")", ")", "for", "i", "in", "range", "(", "Ns", ")", "]", "X_train", "=", "[", "Xt", "[", "i", "]", "[", "train_ind", "[", "i", "]", "]", "for", "i", "in", "range", "(", "Ns", ")", "]", "X_test", "=", "[", "Xt", "[", "i", "]", "[", "test_ind", "[", "i", "]", "]", "for", "i", "in", "range", "(", "Ns", ")", "]", "if", "Xc", "is", "not", "None", ":", "X_train", "=", "TS_Data", "(", "X_train", ",", "Xc", ")", "X_test", "=", "TS_Data", "(", "X_test", ",", "Xc", ")", "if", "len", "(", "np", ".", "atleast_1d", "(", "y", "[", "0", "]", ")", ")", "==", "len", "(", "Xt", "[", "0", "]", ")", ":", "# y is a time series", "y_train", "=", "[", "y", "[", "i", "]", "[", "train_ind", "[", "i", "]", "]", "for", "i", "in", "range", "(", "Ns", ")", "]", "y_test", "=", "[", "y", "[", "i", "]", "[", "test_ind", "[", "i", "]", "]", "for", "i", "in", "range", "(", "Ns", ")", "]", "else", ":", "# y is contextual", "y_train", "=", "y", "y_test", "=", "y", "return", "X_train", ",", "X_test", ",", "y_train", ",", "y_test" ]
Split time series or sequence data along the time axis. Test data is drawn from the end of each series / sequence Parameters ---------- X : array-like, shape [n_series, ...] Time series data and (optionally) contextual data y : array-like shape [n_series, ] target vector test_size : float between 0 and 1, amount to allocate to test Returns ------- X_train : array-like, shape [n_series, ] X_test : array-like, shape [n_series, ] y_train : array-like, shape [n_series, ] y_test : array-like, shape [n_series, ]
[ "Split", "time", "series", "or", "sequence", "data", "along", "the", "time", "axis", ".", "Test", "data", "is", "drawn", "from", "the", "end", "of", "each", "series", "/", "sequence" ]
python
train
couchbase/couchbase-python-client
couchbase/mutation_state.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/mutation_state.py#L100-L108
def decode(cls, s): """ Create a :class:`MutationState` from the encoded string :param s: The encoded string :return: A new MutationState restored from the string """ d = couchbase._from_json(s) o = MutationState() o._sv = d
[ "def", "decode", "(", "cls", ",", "s", ")", ":", "d", "=", "couchbase", ".", "_from_json", "(", "s", ")", "o", "=", "MutationState", "(", ")", "o", ".", "_sv", "=", "d" ]
Create a :class:`MutationState` from the encoded string :param s: The encoded string :return: A new MutationState restored from the string
[ "Create", "a", ":", "class", ":", "MutationState", "from", "the", "encoded", "string", ":", "param", "s", ":", "The", "encoded", "string", ":", "return", ":", "A", "new", "MutationState", "restored", "from", "the", "string" ]
python
train
lvieirajr/mongorest
mongorest/collection.py
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L228-L262
def delete(self, **kwargs): """ Deletes the document if it is saved in the collection. """ if self.is_valid: if '_id' in self._document: to_delete = self.find_one({'_id': self._id}) if to_delete: before = self.before_delete() if before: return before try: self.delete_one({'_id': self._id}) self.after_delete() return self._document except PyMongoException as exc: return PyMongoError( error_message=exc.details.get( 'errmsg', exc.details.get( 'err', 'PyMongoError.' ) ), operation='delete', collection=type(self).__name__, document=self._document, ) else: return DocumentNotFoundError(type(self).__name__, self._id) else: return UnidentifiedDocumentError( type(self).__name__, self._document )
[ "def", "delete", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "is_valid", ":", "if", "'_id'", "in", "self", ".", "_document", ":", "to_delete", "=", "self", ".", "find_one", "(", "{", "'_id'", ":", "self", ".", "_id", "}", ")", "if", "to_delete", ":", "before", "=", "self", ".", "before_delete", "(", ")", "if", "before", ":", "return", "before", "try", ":", "self", ".", "delete_one", "(", "{", "'_id'", ":", "self", ".", "_id", "}", ")", "self", ".", "after_delete", "(", ")", "return", "self", ".", "_document", "except", "PyMongoException", "as", "exc", ":", "return", "PyMongoError", "(", "error_message", "=", "exc", ".", "details", ".", "get", "(", "'errmsg'", ",", "exc", ".", "details", ".", "get", "(", "'err'", ",", "'PyMongoError.'", ")", ")", ",", "operation", "=", "'delete'", ",", "collection", "=", "type", "(", "self", ")", ".", "__name__", ",", "document", "=", "self", ".", "_document", ",", ")", "else", ":", "return", "DocumentNotFoundError", "(", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "_id", ")", "else", ":", "return", "UnidentifiedDocumentError", "(", "type", "(", "self", ")", ".", "__name__", ",", "self", ".", "_document", ")" ]
Deletes the document if it is saved in the collection.
[ "Deletes", "the", "document", "if", "it", "is", "saved", "in", "the", "collection", "." ]
python
train
uploadcare/pyuploadcare
pyuploadcare/ucare_cli/__init__.py
https://github.com/uploadcare/pyuploadcare/blob/cefddc0306133a71e37b18e8700df5948ef49b37/pyuploadcare/ucare_cli/__init__.py#L38-L59
def _list(api_list_class, arg_namespace, **extra): """ A common function for building methods of the "list showing". """ if arg_namespace.starting_point: ordering_field = (arg_namespace.ordering or '').lstrip('-') if ordering_field in ('', 'datetime_uploaded', 'datetime_created'): arg_namespace.starting_point = parser.parse( arg_namespace.starting_point) items = api_list_class( starting_point=arg_namespace.starting_point, ordering=arg_namespace.ordering, limit=arg_namespace.limit, request_limit=arg_namespace.request_limit, **extra ) items.constructor = lambda x: x try: pprint(list(items)) except ValueError as e: print(e)
[ "def", "_list", "(", "api_list_class", ",", "arg_namespace", ",", "*", "*", "extra", ")", ":", "if", "arg_namespace", ".", "starting_point", ":", "ordering_field", "=", "(", "arg_namespace", ".", "ordering", "or", "''", ")", ".", "lstrip", "(", "'-'", ")", "if", "ordering_field", "in", "(", "''", ",", "'datetime_uploaded'", ",", "'datetime_created'", ")", ":", "arg_namespace", ".", "starting_point", "=", "parser", ".", "parse", "(", "arg_namespace", ".", "starting_point", ")", "items", "=", "api_list_class", "(", "starting_point", "=", "arg_namespace", ".", "starting_point", ",", "ordering", "=", "arg_namespace", ".", "ordering", ",", "limit", "=", "arg_namespace", ".", "limit", ",", "request_limit", "=", "arg_namespace", ".", "request_limit", ",", "*", "*", "extra", ")", "items", ".", "constructor", "=", "lambda", "x", ":", "x", "try", ":", "pprint", "(", "list", "(", "items", ")", ")", "except", "ValueError", "as", "e", ":", "print", "(", "e", ")" ]
A common function for building methods of the "list showing".
[ "A", "common", "function", "for", "building", "methods", "of", "the", "list", "showing", "." ]
python
test
reingart/pyafipws
wslpg.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslpg.py#L2605-L2618
def BuscarLocalidades(self, cod_prov, cod_localidad=None, consultar=True): "Devuelve la localidad o la consulta en AFIP (uso interno)" # si no se especifíca cod_localidad, es util para reconstruir la cache import wslpg_datos as datos if not str(cod_localidad) in datos.LOCALIDADES and consultar: d = self.ConsultarLocalidadesPorProvincia(cod_prov, sep=None) try: # actualizar el diccionario persistente (shelve) datos.LOCALIDADES.update(d) except Exception, e: print "EXCEPCION CAPTURADA", e # capturo errores por permisos (o por concurrencia) datos.LOCALIDADES = d return datos.LOCALIDADES.get(str(cod_localidad), "")
[ "def", "BuscarLocalidades", "(", "self", ",", "cod_prov", ",", "cod_localidad", "=", "None", ",", "consultar", "=", "True", ")", ":", "# si no se especifíca cod_localidad, es util para reconstruir la cache", "import", "wslpg_datos", "as", "datos", "if", "not", "str", "(", "cod_localidad", ")", "in", "datos", ".", "LOCALIDADES", "and", "consultar", ":", "d", "=", "self", ".", "ConsultarLocalidadesPorProvincia", "(", "cod_prov", ",", "sep", "=", "None", ")", "try", ":", "# actualizar el diccionario persistente (shelve)", "datos", ".", "LOCALIDADES", ".", "update", "(", "d", ")", "except", "Exception", ",", "e", ":", "print", "\"EXCEPCION CAPTURADA\"", ",", "e", "# capturo errores por permisos (o por concurrencia)", "datos", ".", "LOCALIDADES", "=", "d", "return", "datos", ".", "LOCALIDADES", ".", "get", "(", "str", "(", "cod_localidad", ")", ",", "\"\"", ")" ]
Devuelve la localidad o la consulta en AFIP (uso interno)
[ "Devuelve", "la", "localidad", "o", "la", "consulta", "en", "AFIP", "(", "uso", "interno", ")" ]
python
train
matthew-sochor/transfer
transfer/project.py
https://github.com/matthew-sochor/transfer/blob/c1931a16459275faa7a5e9860fbed079a4848b80/transfer/project.py#L20-L143
def configure(): ''' Configure the transfer environment and store ''' completer = Completer() readline.set_completer_delims('\t') readline.parse_and_bind('tab: complete') readline.set_completer(completer.path_completer) home = os.path.expanduser('~') if os.path.isfile(os.path.join(home, '.transfer', 'config.yaml')): with open(os.path.join(home, '.transfer', 'config.yaml'), 'r') as fp: config = yaml.load(fp.read()) else: config = [] project_name = input('Name your project: ') existing_project = None for project in config: if project_name == project['name']: existing_project = project_name if existing_project is not None: print(colored('Project ' + project_name + ' already exists', 'red')) overwrite = str_input('Would you like to overwrite this project? (yes or no) ', ['yes', 'no']) if overwrite == 'no': return else: config = [project for project in config if project_name != project['name']] image_path = os.path.expanduser(input('Select parent directory for your images: ')) path_unset = True while path_unset: project_path = os.path.expanduser(input('Select destination for your project: ')) if (project_path.find(image_path) == 0): print('Project destination should not be same or within image directory!') else: path_unset = False print('Select architecture:') print('[0] resnet50') print('[1] xception') print('[2] inception_v3') architecture = int_input('choice', 0, 2, show_range = False) if architecture == 0: arch = 'resnet50' img_dim = 224 conv_dim = 7 final_cutoff = 80 elif architecture == 1: arch = 'xception' img_dim = 299 conv_dim = 10 final_cutoff = 80 else: arch = 'inception_v3' img_dim = 299 conv_dim = 8 final_cutoff = 80 api_port = int_input('port for local prediction API (suggested: 5000)', 1024, 49151) kfold = int_input('number of folds to use (suggested: 5)', 3, 10) kfold_every = bool_input('Fit a model for every fold? (if false, just fit one)') print('Warning: if working on a remote computer, you may not be able to plot!') plot_cm = bool_input('Plot a confusion matrix after training?') batch_size = int_input('batch size (suggested: 8)', 1, 64) learning_rate = float_input('learning rate (suggested: 0.001)', 0, 1) learning_rate_decay = float_input('learning decay rate (suggested: 0.000001)', 0, 1) cycle = int_input('number of cycles before resetting the learning rate (suggested: 3)', 1, 10) num_rounds = int_input('number of rounds (suggested: 3)', 1, 100) print('Select image resolution:') print('[0] low (' + str(img_dim) + ' px)') print('[1] mid (' + str(img_dim * 2) + ' px)') print('[2] high (' + str(img_dim * 4) + ' px)') img_resolution_index = int_input('choice', 0, 2, show_range = False) if img_resolution_index == 0: img_size = 1 elif img_resolution_index == 1: img_size = 2 else: img_size = 4 use_augmentation = str_input('Would you like to add image augmentation? (yes or no) ', ['yes', 'no']) if use_augmentation == 'yes': augmentations = select_augmentations() else: augmentations = None project = {'name': project_name, 'img_path': image_path, 'path': project_path, 'plot': plot_cm, 'api_port': api_port, 'kfold': kfold, 'kfold_every': kfold_every, 'cycle': cycle, 'seed': np.random.randint(9999), 'batch_size': batch_size, 'learning_rate': learning_rate, 'learning_rate_decay': learning_rate_decay, 'final_cutoff': final_cutoff, 'rounds': num_rounds, 'img_size': img_size, 'augmentations': augmentations, 'architecture': arch, 'img_dim': img_dim, 'conv_dim': conv_dim, 'is_split': False, 'is_array': False, 'is_augmented': False, 'is_pre_model': False, 'is_final': False, 'model_round': 0, 'server_weights': None, 'last_weights': None, 'best_weights': None} config.append(project) store_config(config) print('') print(colored('Project configure saved!', 'cyan')) print('') print('To run project:') print('') print(colored(' transfer --run --project ' + project_name, 'green')) print('or') print(colored(' transfer -r -p ' + project_name, 'green'))
[ "def", "configure", "(", ")", ":", "completer", "=", "Completer", "(", ")", "readline", ".", "set_completer_delims", "(", "'\\t'", ")", "readline", ".", "parse_and_bind", "(", "'tab: complete'", ")", "readline", ".", "set_completer", "(", "completer", ".", "path_completer", ")", "home", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "home", ",", "'.transfer'", ",", "'config.yaml'", ")", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "home", ",", "'.transfer'", ",", "'config.yaml'", ")", ",", "'r'", ")", "as", "fp", ":", "config", "=", "yaml", ".", "load", "(", "fp", ".", "read", "(", ")", ")", "else", ":", "config", "=", "[", "]", "project_name", "=", "input", "(", "'Name your project: '", ")", "existing_project", "=", "None", "for", "project", "in", "config", ":", "if", "project_name", "==", "project", "[", "'name'", "]", ":", "existing_project", "=", "project_name", "if", "existing_project", "is", "not", "None", ":", "print", "(", "colored", "(", "'Project '", "+", "project_name", "+", "' already exists'", ",", "'red'", ")", ")", "overwrite", "=", "str_input", "(", "'Would you like to overwrite this project? (yes or no) '", ",", "[", "'yes'", ",", "'no'", "]", ")", "if", "overwrite", "==", "'no'", ":", "return", "else", ":", "config", "=", "[", "project", "for", "project", "in", "config", "if", "project_name", "!=", "project", "[", "'name'", "]", "]", "image_path", "=", "os", ".", "path", ".", "expanduser", "(", "input", "(", "'Select parent directory for your images: '", ")", ")", "path_unset", "=", "True", "while", "path_unset", ":", "project_path", "=", "os", ".", "path", ".", "expanduser", "(", "input", "(", "'Select destination for your project: '", ")", ")", "if", "(", "project_path", ".", "find", "(", "image_path", ")", "==", "0", ")", ":", "print", "(", "'Project destination should not be same or within image directory!'", ")", "else", ":", "path_unset", "=", "False", "print", "(", "'Select architecture:'", ")", "print", "(", "'[0] resnet50'", ")", "print", "(", "'[1] xception'", ")", "print", "(", "'[2] inception_v3'", ")", "architecture", "=", "int_input", "(", "'choice'", ",", "0", ",", "2", ",", "show_range", "=", "False", ")", "if", "architecture", "==", "0", ":", "arch", "=", "'resnet50'", "img_dim", "=", "224", "conv_dim", "=", "7", "final_cutoff", "=", "80", "elif", "architecture", "==", "1", ":", "arch", "=", "'xception'", "img_dim", "=", "299", "conv_dim", "=", "10", "final_cutoff", "=", "80", "else", ":", "arch", "=", "'inception_v3'", "img_dim", "=", "299", "conv_dim", "=", "8", "final_cutoff", "=", "80", "api_port", "=", "int_input", "(", "'port for local prediction API (suggested: 5000)'", ",", "1024", ",", "49151", ")", "kfold", "=", "int_input", "(", "'number of folds to use (suggested: 5)'", ",", "3", ",", "10", ")", "kfold_every", "=", "bool_input", "(", "'Fit a model for every fold? (if false, just fit one)'", ")", "print", "(", "'Warning: if working on a remote computer, you may not be able to plot!'", ")", "plot_cm", "=", "bool_input", "(", "'Plot a confusion matrix after training?'", ")", "batch_size", "=", "int_input", "(", "'batch size (suggested: 8)'", ",", "1", ",", "64", ")", "learning_rate", "=", "float_input", "(", "'learning rate (suggested: 0.001)'", ",", "0", ",", "1", ")", "learning_rate_decay", "=", "float_input", "(", "'learning decay rate (suggested: 0.000001)'", ",", "0", ",", "1", ")", "cycle", "=", "int_input", "(", "'number of cycles before resetting the learning rate (suggested: 3)'", ",", "1", ",", "10", ")", "num_rounds", "=", "int_input", "(", "'number of rounds (suggested: 3)'", ",", "1", ",", "100", ")", "print", "(", "'Select image resolution:'", ")", "print", "(", "'[0] low ('", "+", "str", "(", "img_dim", ")", "+", "' px)'", ")", "print", "(", "'[1] mid ('", "+", "str", "(", "img_dim", "*", "2", ")", "+", "' px)'", ")", "print", "(", "'[2] high ('", "+", "str", "(", "img_dim", "*", "4", ")", "+", "' px)'", ")", "img_resolution_index", "=", "int_input", "(", "'choice'", ",", "0", ",", "2", ",", "show_range", "=", "False", ")", "if", "img_resolution_index", "==", "0", ":", "img_size", "=", "1", "elif", "img_resolution_index", "==", "1", ":", "img_size", "=", "2", "else", ":", "img_size", "=", "4", "use_augmentation", "=", "str_input", "(", "'Would you like to add image augmentation? (yes or no) '", ",", "[", "'yes'", ",", "'no'", "]", ")", "if", "use_augmentation", "==", "'yes'", ":", "augmentations", "=", "select_augmentations", "(", ")", "else", ":", "augmentations", "=", "None", "project", "=", "{", "'name'", ":", "project_name", ",", "'img_path'", ":", "image_path", ",", "'path'", ":", "project_path", ",", "'plot'", ":", "plot_cm", ",", "'api_port'", ":", "api_port", ",", "'kfold'", ":", "kfold", ",", "'kfold_every'", ":", "kfold_every", ",", "'cycle'", ":", "cycle", ",", "'seed'", ":", "np", ".", "random", ".", "randint", "(", "9999", ")", ",", "'batch_size'", ":", "batch_size", ",", "'learning_rate'", ":", "learning_rate", ",", "'learning_rate_decay'", ":", "learning_rate_decay", ",", "'final_cutoff'", ":", "final_cutoff", ",", "'rounds'", ":", "num_rounds", ",", "'img_size'", ":", "img_size", ",", "'augmentations'", ":", "augmentations", ",", "'architecture'", ":", "arch", ",", "'img_dim'", ":", "img_dim", ",", "'conv_dim'", ":", "conv_dim", ",", "'is_split'", ":", "False", ",", "'is_array'", ":", "False", ",", "'is_augmented'", ":", "False", ",", "'is_pre_model'", ":", "False", ",", "'is_final'", ":", "False", ",", "'model_round'", ":", "0", ",", "'server_weights'", ":", "None", ",", "'last_weights'", ":", "None", ",", "'best_weights'", ":", "None", "}", "config", ".", "append", "(", "project", ")", "store_config", "(", "config", ")", "print", "(", "''", ")", "print", "(", "colored", "(", "'Project configure saved!'", ",", "'cyan'", ")", ")", "print", "(", "''", ")", "print", "(", "'To run project:'", ")", "print", "(", "''", ")", "print", "(", "colored", "(", "' transfer --run --project '", "+", "project_name", ",", "'green'", ")", ")", "print", "(", "'or'", ")", "print", "(", "colored", "(", "' transfer -r -p '", "+", "project_name", ",", "'green'", ")", ")" ]
Configure the transfer environment and store
[ "Configure", "the", "transfer", "environment", "and", "store" ]
python
train
heroku/sf-suds
suds/sudsobject.py
https://github.com/heroku/sf-suds/blob/44b6743a45ff4447157605d6fecc9bf5922ce68a/suds/sudsobject.py#L379-L390
def exclude(self, d, item): """ check metadata for excluded items """ try: md = d.__metadata__ pmd = getattr(md, '__print__', None) if pmd is None: return False excludes = getattr(pmd, 'excludes', []) return ( item[0] in excludes ) except: pass return False
[ "def", "exclude", "(", "self", ",", "d", ",", "item", ")", ":", "try", ":", "md", "=", "d", ".", "__metadata__", "pmd", "=", "getattr", "(", "md", ",", "'__print__'", ",", "None", ")", "if", "pmd", "is", "None", ":", "return", "False", "excludes", "=", "getattr", "(", "pmd", ",", "'excludes'", ",", "[", "]", ")", "return", "(", "item", "[", "0", "]", "in", "excludes", ")", "except", ":", "pass", "return", "False" ]
check metadata for excluded items
[ "check", "metadata", "for", "excluded", "items" ]
python
train
gbiggs/rtctree
rtctree/component.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/component.py#L440-L448
def members(self): '''Member components if this component is composite.''' with self._mutex: if not self._members: self._members = {} for o in self.organisations: # TODO: Search for these in the tree self._members[o.org_id] = o.obj.get_members() return self._members
[ "def", "members", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "if", "not", "self", ".", "_members", ":", "self", ".", "_members", "=", "{", "}", "for", "o", "in", "self", ".", "organisations", ":", "# TODO: Search for these in the tree", "self", ".", "_members", "[", "o", ".", "org_id", "]", "=", "o", ".", "obj", ".", "get_members", "(", ")", "return", "self", ".", "_members" ]
Member components if this component is composite.
[ "Member", "components", "if", "this", "component", "is", "composite", "." ]
python
train
ubyssey/dispatch
dispatch/modules/integrations/integrations.py
https://github.com/ubyssey/dispatch/blob/8da6084fe61726f20e9cf675190480cfc45ee764/dispatch/modules/integrations/integrations.py#L91-L119
def callback(cls, user, query): """Receive OAuth callback request from Facebook.""" # Get settings for this integration settings = cls.get_settings(show_hidden=True) fb = Facebook() payload = { 'client_id': settings['client_id'], 'client_secret': settings['client_secret'], 'code': query['code'], 'redirect_uri': cls.REDIRECT_URI } try: # Authenticate with Facebook fb.get_access_token(payload) # Fetch pages belonging to authenticated user pages = fb.list_pages('me') except FacebookAPIError, e: raise IntegrationCallbackError(e.message) return { 'pages': pages }
[ "def", "callback", "(", "cls", ",", "user", ",", "query", ")", ":", "# Get settings for this integration", "settings", "=", "cls", ".", "get_settings", "(", "show_hidden", "=", "True", ")", "fb", "=", "Facebook", "(", ")", "payload", "=", "{", "'client_id'", ":", "settings", "[", "'client_id'", "]", ",", "'client_secret'", ":", "settings", "[", "'client_secret'", "]", ",", "'code'", ":", "query", "[", "'code'", "]", ",", "'redirect_uri'", ":", "cls", ".", "REDIRECT_URI", "}", "try", ":", "# Authenticate with Facebook", "fb", ".", "get_access_token", "(", "payload", ")", "# Fetch pages belonging to authenticated user", "pages", "=", "fb", ".", "list_pages", "(", "'me'", ")", "except", "FacebookAPIError", ",", "e", ":", "raise", "IntegrationCallbackError", "(", "e", ".", "message", ")", "return", "{", "'pages'", ":", "pages", "}" ]
Receive OAuth callback request from Facebook.
[ "Receive", "OAuth", "callback", "request", "from", "Facebook", "." ]
python
test
rootpy/rootpy
rootpy/tree/cut.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/tree/cut.py#L250-L262
def latex(self): """ Returns a string representation for use in LaTeX """ if not self: return "" s = str(self) s = s.replace("==", " = ") s = s.replace("<=", " \leq ") s = s.replace(">=", " \geq ") s = s.replace("&&", r" \text{ and } ") s = s.replace("||", r" \text{ or } ") return s
[ "def", "latex", "(", "self", ")", ":", "if", "not", "self", ":", "return", "\"\"", "s", "=", "str", "(", "self", ")", "s", "=", "s", ".", "replace", "(", "\"==\"", ",", "\" = \"", ")", "s", "=", "s", ".", "replace", "(", "\"<=\"", ",", "\" \\leq \"", ")", "s", "=", "s", ".", "replace", "(", "\">=\"", ",", "\" \\geq \"", ")", "s", "=", "s", ".", "replace", "(", "\"&&\"", ",", "r\" \\text{ and } \"", ")", "s", "=", "s", ".", "replace", "(", "\"||\"", ",", "r\" \\text{ or } \"", ")", "return", "s" ]
Returns a string representation for use in LaTeX
[ "Returns", "a", "string", "representation", "for", "use", "in", "LaTeX" ]
python
train
JasonKessler/scattertext
scattertext/TermDocMatrix.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/TermDocMatrix.py#L733-L751
def use_categories_as_metadata_and_replace_terms(self): ''' Returns a TermDocMatrix which is identical to self except the metadata values are now identical to the categories present and term-doc-matrix is now the metadata matrix. :return: TermDocMatrix ''' new_metadata_factory = CSRMatrixFactory() for i, category_idx in enumerate(self.get_category_ids()): new_metadata_factory[i, category_idx] = 1 new_metadata = new_metadata_factory.get_csr_matrix() new_tdm = self._make_new_term_doc_matrix(self._mX, new_metadata, self._y, self._metadata_idx_store, self._category_idx_store, copy(self._category_idx_store), self._y == self._y) return new_tdm
[ "def", "use_categories_as_metadata_and_replace_terms", "(", "self", ")", ":", "new_metadata_factory", "=", "CSRMatrixFactory", "(", ")", "for", "i", ",", "category_idx", "in", "enumerate", "(", "self", ".", "get_category_ids", "(", ")", ")", ":", "new_metadata_factory", "[", "i", ",", "category_idx", "]", "=", "1", "new_metadata", "=", "new_metadata_factory", ".", "get_csr_matrix", "(", ")", "new_tdm", "=", "self", ".", "_make_new_term_doc_matrix", "(", "self", ".", "_mX", ",", "new_metadata", ",", "self", ".", "_y", ",", "self", ".", "_metadata_idx_store", ",", "self", ".", "_category_idx_store", ",", "copy", "(", "self", ".", "_category_idx_store", ")", ",", "self", ".", "_y", "==", "self", ".", "_y", ")", "return", "new_tdm" ]
Returns a TermDocMatrix which is identical to self except the metadata values are now identical to the categories present and term-doc-matrix is now the metadata matrix. :return: TermDocMatrix
[ "Returns", "a", "TermDocMatrix", "which", "is", "identical", "to", "self", "except", "the", "metadata", "values", "are", "now", "identical", "to", "the", "categories", "present", "and", "term", "-", "doc", "-", "matrix", "is", "now", "the", "metadata", "matrix", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/distributed/runfn.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/runfn.py#L124-L143
def _add_resources(data, runtime): """Merge input resources with current CWL runtime parameters. """ if "config" not in data: data["config"] = {} # Convert input resources, which may be a JSON string resources = data.get("resources", {}) or {} if isinstance(resources, six.string_types) and resources.startswith(("{", "[")): resources = json.loads(resources) data["resources"] = resources assert isinstance(resources, dict), (resources, data) data["config"]["resources"] = resources # Add in memory and core usage from CWL memory = int(float(runtime["ram"]) / float(runtime["cores"])) data["config"]["resources"].update({"default": {"cores": int(runtime["cores"]), "memory": "%sM" % memory, "jvm_opts": ["-Xms%sm" % min(1000, memory // 2), "-Xmx%sm" % memory]}}) data["config"]["algorithm"]["num_cores"] = int(runtime["cores"]) return data
[ "def", "_add_resources", "(", "data", ",", "runtime", ")", ":", "if", "\"config\"", "not", "in", "data", ":", "data", "[", "\"config\"", "]", "=", "{", "}", "# Convert input resources, which may be a JSON string", "resources", "=", "data", ".", "get", "(", "\"resources\"", ",", "{", "}", ")", "or", "{", "}", "if", "isinstance", "(", "resources", ",", "six", ".", "string_types", ")", "and", "resources", ".", "startswith", "(", "(", "\"{\"", ",", "\"[\"", ")", ")", ":", "resources", "=", "json", ".", "loads", "(", "resources", ")", "data", "[", "\"resources\"", "]", "=", "resources", "assert", "isinstance", "(", "resources", ",", "dict", ")", ",", "(", "resources", ",", "data", ")", "data", "[", "\"config\"", "]", "[", "\"resources\"", "]", "=", "resources", "# Add in memory and core usage from CWL", "memory", "=", "int", "(", "float", "(", "runtime", "[", "\"ram\"", "]", ")", "/", "float", "(", "runtime", "[", "\"cores\"", "]", ")", ")", "data", "[", "\"config\"", "]", "[", "\"resources\"", "]", ".", "update", "(", "{", "\"default\"", ":", "{", "\"cores\"", ":", "int", "(", "runtime", "[", "\"cores\"", "]", ")", ",", "\"memory\"", ":", "\"%sM\"", "%", "memory", ",", "\"jvm_opts\"", ":", "[", "\"-Xms%sm\"", "%", "min", "(", "1000", ",", "memory", "//", "2", ")", ",", "\"-Xmx%sm\"", "%", "memory", "]", "}", "}", ")", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", "[", "\"num_cores\"", "]", "=", "int", "(", "runtime", "[", "\"cores\"", "]", ")", "return", "data" ]
Merge input resources with current CWL runtime parameters.
[ "Merge", "input", "resources", "with", "current", "CWL", "runtime", "parameters", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/abrahamson_2015.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/abrahamson_2015.py#L355-L365
def _compute_forearc_backarc_term(self, C, sites, dists): """ Computes the forearc/backarc scaling term given by equation (4). """ f_faba = np.zeros_like(dists.rhypo) # Term only applies to backarc sites (F_FABA = 0. for forearc) max_dist = dists.rhypo[sites.backarc] max_dist[max_dist < 85.0] = 85.0 f_faba[sites.backarc] = C['theta7'] +\ (C['theta8'] * np.log(max_dist / 40.0)) return f_faba
[ "def", "_compute_forearc_backarc_term", "(", "self", ",", "C", ",", "sites", ",", "dists", ")", ":", "f_faba", "=", "np", ".", "zeros_like", "(", "dists", ".", "rhypo", ")", "# Term only applies to backarc sites (F_FABA = 0. for forearc)", "max_dist", "=", "dists", ".", "rhypo", "[", "sites", ".", "backarc", "]", "max_dist", "[", "max_dist", "<", "85.0", "]", "=", "85.0", "f_faba", "[", "sites", ".", "backarc", "]", "=", "C", "[", "'theta7'", "]", "+", "(", "C", "[", "'theta8'", "]", "*", "np", ".", "log", "(", "max_dist", "/", "40.0", ")", ")", "return", "f_faba" ]
Computes the forearc/backarc scaling term given by equation (4).
[ "Computes", "the", "forearc", "/", "backarc", "scaling", "term", "given", "by", "equation", "(", "4", ")", "." ]
python
train
anchore/anchore
anchore/cli/__init__.py
https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/__init__.py#L101-L186
def main_entry(ctx, verbose, debug, quiet, json, plain, html, config_override): """ Anchore is a tool to analyze, query, and curate container images. The options at this top level control stdout and stderr verbosity and format. After installation, the first command run should be: 'anchore feeds list' to initialize the system and load feed data. High-level example flows: Initialize the system and sync the by-default subscribed feed 'vulnerabilties': \b anchore feeds list anchore feeds sync Analyze an image docker pull nginx:latest anchore analyze --image nginx:latest --imagetype base Generate a summary report on all analyzed images anchore audit report Check gate output for nginx:latest: anchore gate --image nginx:latest """ # Load the config into the context object logfile = None debug_logfile = None try: try: config_overrides = {} if config_override: for el in config_override: try: (key, val) = el.split('=') if not key or not val: raise Exception("could not split by '='") config_overrides[key] = val except: click.echo("Error: specified --config_override param cannot be parsed (should be <config_opt>=<value>): " + str(el)) exit(1) args = {'verbose': verbose, 'debug': debug, 'json': json, 'plain': plain, 'html': html, 'quiet': quiet, 'config_overrides':config_overrides} anchore_conf = AnchoreConfiguration(cliargs=args) except Exception as err: click.echo("Error setting up/reading Anchore configuration", err=True) click.echo("Info: "+str(err), err=True) import traceback traceback.print_exc() sys.exit(1) try: logfile = anchore_conf.data['log_file'] if 'log_file' in anchore_conf.data else None debug_logfile = anchore_conf.data['debug_log_file'] if 'debug_log_file' in anchore_conf.data else None except Exception, e: click.echo(str(e)) ctx.obj = anchore_conf except: if ctx.invoked_subcommand != 'system': click.echo('Expected, but did not find configuration file at %s' % os.path.join(AnchoreConfiguration.DEFAULT_CONFIG_FILE), err=True) exit(1) try: init_output_format(json, plain, debug, verbose, quiet, log_filepath=logfile, debug_log_filepath=debug_logfile) except Exception, e: click.echo('Error initializing logging: %s' % str(e)) exit(2) if not anchore_pre_flight_check(ctx): anchore_print_err("Error running pre-flight checks") exit(1) try: if not anchore.anchore_utils.anchore_common_context_setup(ctx.obj): anchore_print_err("Error setting up common data based on configuration") exit(1) except ValueError as err: print "ERROR: " + str(err) exit(1)
[ "def", "main_entry", "(", "ctx", ",", "verbose", ",", "debug", ",", "quiet", ",", "json", ",", "plain", ",", "html", ",", "config_override", ")", ":", "# Load the config into the context object", "logfile", "=", "None", "debug_logfile", "=", "None", "try", ":", "try", ":", "config_overrides", "=", "{", "}", "if", "config_override", ":", "for", "el", "in", "config_override", ":", "try", ":", "(", "key", ",", "val", ")", "=", "el", ".", "split", "(", "'='", ")", "if", "not", "key", "or", "not", "val", ":", "raise", "Exception", "(", "\"could not split by '='\"", ")", "config_overrides", "[", "key", "]", "=", "val", "except", ":", "click", ".", "echo", "(", "\"Error: specified --config_override param cannot be parsed (should be <config_opt>=<value>): \"", "+", "str", "(", "el", ")", ")", "exit", "(", "1", ")", "args", "=", "{", "'verbose'", ":", "verbose", ",", "'debug'", ":", "debug", ",", "'json'", ":", "json", ",", "'plain'", ":", "plain", ",", "'html'", ":", "html", ",", "'quiet'", ":", "quiet", ",", "'config_overrides'", ":", "config_overrides", "}", "anchore_conf", "=", "AnchoreConfiguration", "(", "cliargs", "=", "args", ")", "except", "Exception", "as", "err", ":", "click", ".", "echo", "(", "\"Error setting up/reading Anchore configuration\"", ",", "err", "=", "True", ")", "click", ".", "echo", "(", "\"Info: \"", "+", "str", "(", "err", ")", ",", "err", "=", "True", ")", "import", "traceback", "traceback", ".", "print_exc", "(", ")", "sys", ".", "exit", "(", "1", ")", "try", ":", "logfile", "=", "anchore_conf", ".", "data", "[", "'log_file'", "]", "if", "'log_file'", "in", "anchore_conf", ".", "data", "else", "None", "debug_logfile", "=", "anchore_conf", ".", "data", "[", "'debug_log_file'", "]", "if", "'debug_log_file'", "in", "anchore_conf", ".", "data", "else", "None", "except", "Exception", ",", "e", ":", "click", ".", "echo", "(", "str", "(", "e", ")", ")", "ctx", ".", "obj", "=", "anchore_conf", "except", ":", "if", "ctx", ".", "invoked_subcommand", "!=", "'system'", ":", "click", ".", "echo", "(", "'Expected, but did not find configuration file at %s'", "%", "os", ".", "path", ".", "join", "(", "AnchoreConfiguration", ".", "DEFAULT_CONFIG_FILE", ")", ",", "err", "=", "True", ")", "exit", "(", "1", ")", "try", ":", "init_output_format", "(", "json", ",", "plain", ",", "debug", ",", "verbose", ",", "quiet", ",", "log_filepath", "=", "logfile", ",", "debug_log_filepath", "=", "debug_logfile", ")", "except", "Exception", ",", "e", ":", "click", ".", "echo", "(", "'Error initializing logging: %s'", "%", "str", "(", "e", ")", ")", "exit", "(", "2", ")", "if", "not", "anchore_pre_flight_check", "(", "ctx", ")", ":", "anchore_print_err", "(", "\"Error running pre-flight checks\"", ")", "exit", "(", "1", ")", "try", ":", "if", "not", "anchore", ".", "anchore_utils", ".", "anchore_common_context_setup", "(", "ctx", ".", "obj", ")", ":", "anchore_print_err", "(", "\"Error setting up common data based on configuration\"", ")", "exit", "(", "1", ")", "except", "ValueError", "as", "err", ":", "print", "\"ERROR: \"", "+", "str", "(", "err", ")", "exit", "(", "1", ")" ]
Anchore is a tool to analyze, query, and curate container images. The options at this top level control stdout and stderr verbosity and format. After installation, the first command run should be: 'anchore feeds list' to initialize the system and load feed data. High-level example flows: Initialize the system and sync the by-default subscribed feed 'vulnerabilties': \b anchore feeds list anchore feeds sync Analyze an image docker pull nginx:latest anchore analyze --image nginx:latest --imagetype base Generate a summary report on all analyzed images anchore audit report Check gate output for nginx:latest: anchore gate --image nginx:latest
[ "Anchore", "is", "a", "tool", "to", "analyze", "query", "and", "curate", "container", "images", ".", "The", "options", "at", "this", "top", "level", "control", "stdout", "and", "stderr", "verbosity", "and", "format", "." ]
python
train
saltstack/salt
salt/log/handlers/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/log/handlers/__init__.py#L73-L87
def sync_with_handlers(self, handlers=()): ''' Sync the stored log records to the provided log handlers. ''' if not handlers: return while self.__messages: record = self.__messages.pop(0) for handler in handlers: if handler.level > record.levelno: # If the handler's level is higher than the log record one, # it should not handle the log record continue handler.handle(record)
[ "def", "sync_with_handlers", "(", "self", ",", "handlers", "=", "(", ")", ")", ":", "if", "not", "handlers", ":", "return", "while", "self", ".", "__messages", ":", "record", "=", "self", ".", "__messages", ".", "pop", "(", "0", ")", "for", "handler", "in", "handlers", ":", "if", "handler", ".", "level", ">", "record", ".", "levelno", ":", "# If the handler's level is higher than the log record one,", "# it should not handle the log record", "continue", "handler", ".", "handle", "(", "record", ")" ]
Sync the stored log records to the provided log handlers.
[ "Sync", "the", "stored", "log", "records", "to", "the", "provided", "log", "handlers", "." ]
python
train
Erotemic/utool
utool/util_iter.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_iter.py#L17-L24
def wrap_iterable(obj): """ Returns: wrapped_obj, was_scalar """ was_scalar = not isiterable(obj) wrapped_obj = [obj] if was_scalar else obj return wrapped_obj, was_scalar
[ "def", "wrap_iterable", "(", "obj", ")", ":", "was_scalar", "=", "not", "isiterable", "(", "obj", ")", "wrapped_obj", "=", "[", "obj", "]", "if", "was_scalar", "else", "obj", "return", "wrapped_obj", ",", "was_scalar" ]
Returns: wrapped_obj, was_scalar
[ "Returns", ":", "wrapped_obj", "was_scalar" ]
python
train
nugget/python-insteonplm
insteonplm/messages/messageFlags.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/messages/messageFlags.py#L256-L275
def _normalize(self, flags): """Take any format of flags and turn it into a hex string.""" norm = None if isinstance(flags, MessageFlags): norm = flags.bytes elif isinstance(flags, bytearray): norm = binascii.hexlify(flags) elif isinstance(flags, int): norm = bytes([flags]) elif isinstance(flags, bytes): norm = binascii.hexlify(flags) elif isinstance(flags, str): flags = flags[0:2] norm = binascii.hexlify(binascii.unhexlify(flags.lower())) elif flags is None: norm = None else: _LOGGER.warning('MessageFlags with unknown type %s: %r', type(flags), flags) return norm
[ "def", "_normalize", "(", "self", ",", "flags", ")", ":", "norm", "=", "None", "if", "isinstance", "(", "flags", ",", "MessageFlags", ")", ":", "norm", "=", "flags", ".", "bytes", "elif", "isinstance", "(", "flags", ",", "bytearray", ")", ":", "norm", "=", "binascii", ".", "hexlify", "(", "flags", ")", "elif", "isinstance", "(", "flags", ",", "int", ")", ":", "norm", "=", "bytes", "(", "[", "flags", "]", ")", "elif", "isinstance", "(", "flags", ",", "bytes", ")", ":", "norm", "=", "binascii", ".", "hexlify", "(", "flags", ")", "elif", "isinstance", "(", "flags", ",", "str", ")", ":", "flags", "=", "flags", "[", "0", ":", "2", "]", "norm", "=", "binascii", ".", "hexlify", "(", "binascii", ".", "unhexlify", "(", "flags", ".", "lower", "(", ")", ")", ")", "elif", "flags", "is", "None", ":", "norm", "=", "None", "else", ":", "_LOGGER", ".", "warning", "(", "'MessageFlags with unknown type %s: %r'", ",", "type", "(", "flags", ")", ",", "flags", ")", "return", "norm" ]
Take any format of flags and turn it into a hex string.
[ "Take", "any", "format", "of", "flags", "and", "turn", "it", "into", "a", "hex", "string", "." ]
python
train
matthieugouel/gibica
gibica/parser.py
https://github.com/matthieugouel/gibica/blob/65f937f7a6255078cc22eb7691a2897466032909/gibica/parser.py#L218-L230
def logical_or_expr(self): """ logical_or_expr: logical_and_expr ('or' logical_and_expr)* """ node = self.logical_and_expr() while self.token.nature == Nature.OR: token = self.token self._process(Nature.OR) node = BinaryOperation(left=node, op=token, right=self.logical_and_expr()) return node
[ "def", "logical_or_expr", "(", "self", ")", ":", "node", "=", "self", ".", "logical_and_expr", "(", ")", "while", "self", ".", "token", ".", "nature", "==", "Nature", ".", "OR", ":", "token", "=", "self", ".", "token", "self", ".", "_process", "(", "Nature", ".", "OR", ")", "node", "=", "BinaryOperation", "(", "left", "=", "node", ",", "op", "=", "token", ",", "right", "=", "self", ".", "logical_and_expr", "(", ")", ")", "return", "node" ]
logical_or_expr: logical_and_expr ('or' logical_and_expr)*
[ "logical_or_expr", ":", "logical_and_expr", "(", "or", "logical_and_expr", ")", "*" ]
python
train
KnowledgeLinks/rdfframework
rdfframework/rml/rmlmanager.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rml/rmlmanager.py#L59-L105
def register_rml_def(self, location_type, location, filename=None, **kwargs): """ Registers the rml file locations for easy access Args: ----- location_type: ['package_all', 'package_file', 'directory', 'filepath'] location: The correlated location string based on the location_type filename: Optional, associated with 'package_file' location_type kwargs: ------- include_subfolders: Boolean """ if location_type == 'directory': self.register_directory(location, **kwargs) elif location_type == 'filepath': if not os.path.exists(location): raise OSError("File not found", location) if os.path.isfile(location): self.register_rml(location) elif filename: new_loc = os.path.join(location, filename) if not os.path.exists(new_loc): raise OSError("File not found", new_loc) elif os.path.isfile(new_loc): self.register_rml(new_loc) else: raise OSError("File not found", location) elif location_type.startswith('package'): pkg_path = \ importlib.util.find_spec(\ location).submodule_search_locations[0] if location_type.endswith('_all'): self.register_directory(pkg_path, **kwargs) elif location_type.endswith('_file'): filepath = os.path.join(pkg_path, filename) self.register_rml(filepath, **kwargs) else: raise NotImplementedError
[ "def", "register_rml_def", "(", "self", ",", "location_type", ",", "location", ",", "filename", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "location_type", "==", "'directory'", ":", "self", ".", "register_directory", "(", "location", ",", "*", "*", "kwargs", ")", "elif", "location_type", "==", "'filepath'", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "location", ")", ":", "raise", "OSError", "(", "\"File not found\"", ",", "location", ")", "if", "os", ".", "path", ".", "isfile", "(", "location", ")", ":", "self", ".", "register_rml", "(", "location", ")", "elif", "filename", ":", "new_loc", "=", "os", ".", "path", ".", "join", "(", "location", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "new_loc", ")", ":", "raise", "OSError", "(", "\"File not found\"", ",", "new_loc", ")", "elif", "os", ".", "path", ".", "isfile", "(", "new_loc", ")", ":", "self", ".", "register_rml", "(", "new_loc", ")", "else", ":", "raise", "OSError", "(", "\"File not found\"", ",", "location", ")", "elif", "location_type", ".", "startswith", "(", "'package'", ")", ":", "pkg_path", "=", "importlib", ".", "util", ".", "find_spec", "(", "location", ")", ".", "submodule_search_locations", "[", "0", "]", "if", "location_type", ".", "endswith", "(", "'_all'", ")", ":", "self", ".", "register_directory", "(", "pkg_path", ",", "*", "*", "kwargs", ")", "elif", "location_type", ".", "endswith", "(", "'_file'", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "pkg_path", ",", "filename", ")", "self", ".", "register_rml", "(", "filepath", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "NotImplementedError" ]
Registers the rml file locations for easy access Args: ----- location_type: ['package_all', 'package_file', 'directory', 'filepath'] location: The correlated location string based on the location_type filename: Optional, associated with 'package_file' location_type kwargs: ------- include_subfolders: Boolean
[ "Registers", "the", "rml", "file", "locations", "for", "easy", "access" ]
python
train
soasme/rio
rio/models/utils.py
https://github.com/soasme/rio/blob/f722eb0ff4b0382bceaff77737f0b87cb78429e7/rio/models/utils.py#L143-L150
def get_data_by_slug_or_404(model, slug, kind='', **kwargs): """Wrap get_data_by_slug, abort 404 if missing data.""" data = get_data_by_slug(model, slug, kind, **kwargs) if not data: abort(404) return data
[ "def", "get_data_by_slug_or_404", "(", "model", ",", "slug", ",", "kind", "=", "''", ",", "*", "*", "kwargs", ")", ":", "data", "=", "get_data_by_slug", "(", "model", ",", "slug", ",", "kind", ",", "*", "*", "kwargs", ")", "if", "not", "data", ":", "abort", "(", "404", ")", "return", "data" ]
Wrap get_data_by_slug, abort 404 if missing data.
[ "Wrap", "get_data_by_slug", "abort", "404", "if", "missing", "data", "." ]
python
train
fmfn/BayesianOptimization
examples/sklearn_example.py
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L21-L33
def svc_cv(C, gamma, data, targets): """SVC cross validation. This function will instantiate a SVC classifier with parameters C and gamma. Combined with data and targets this will in turn be used to perform cross validation. The result of cross validation is returned. Our goal is to find combinations of C and gamma that maximizes the roc_auc metric. """ estimator = SVC(C=C, gamma=gamma, random_state=2) cval = cross_val_score(estimator, data, targets, scoring='roc_auc', cv=4) return cval.mean()
[ "def", "svc_cv", "(", "C", ",", "gamma", ",", "data", ",", "targets", ")", ":", "estimator", "=", "SVC", "(", "C", "=", "C", ",", "gamma", "=", "gamma", ",", "random_state", "=", "2", ")", "cval", "=", "cross_val_score", "(", "estimator", ",", "data", ",", "targets", ",", "scoring", "=", "'roc_auc'", ",", "cv", "=", "4", ")", "return", "cval", ".", "mean", "(", ")" ]
SVC cross validation. This function will instantiate a SVC classifier with parameters C and gamma. Combined with data and targets this will in turn be used to perform cross validation. The result of cross validation is returned. Our goal is to find combinations of C and gamma that maximizes the roc_auc metric.
[ "SVC", "cross", "validation", "." ]
python
train
synw/gencharts
gencharts/__init__.py
https://github.com/synw/gencharts/blob/fa35604a9445b399bb4f91bc91af488e8e8208fd/gencharts/__init__.py#L89-L100
def _patch_json(self, json_data): """ Patch the Altair generated json to the newest Vega Lite spec """ json_data = json.loads(json_data) # add schema json_data["$schema"] = "https://vega.github.io/schema/vega-lite/2.0.0-beta.15.json" # add top level width and height json_data["width"] = json_data["config"]["cell"]["width"] json_data["height"] = json_data["config"]["cell"]["height"] del(json_data["config"]["cell"]) return json.dumps(json_data)
[ "def", "_patch_json", "(", "self", ",", "json_data", ")", ":", "json_data", "=", "json", ".", "loads", "(", "json_data", ")", "# add schema", "json_data", "[", "\"$schema\"", "]", "=", "\"https://vega.github.io/schema/vega-lite/2.0.0-beta.15.json\"", "# add top level width and height", "json_data", "[", "\"width\"", "]", "=", "json_data", "[", "\"config\"", "]", "[", "\"cell\"", "]", "[", "\"width\"", "]", "json_data", "[", "\"height\"", "]", "=", "json_data", "[", "\"config\"", "]", "[", "\"cell\"", "]", "[", "\"height\"", "]", "del", "(", "json_data", "[", "\"config\"", "]", "[", "\"cell\"", "]", ")", "return", "json", ".", "dumps", "(", "json_data", ")" ]
Patch the Altair generated json to the newest Vega Lite spec
[ "Patch", "the", "Altair", "generated", "json", "to", "the", "newest", "Vega", "Lite", "spec" ]
python
test
nugget/python-insteonplm
insteonplm/states/onOff.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L751-L778
def _set_status_data(self, userdata): """Set status properties from userdata response. Response values: d3: On Mask d4: Off Mask d5: X10 House Code d6: X10 Unit d7: Ramp Rate d8: On-Level d9: LED Brightness d10: Non-Toggle Mask d11: LED Bit Mask d12: X10 ALL Bit Mask d13: On/Off Bit Mask """ self._on_mask = userdata['d3'] self._off_mask = userdata['d4'] self._x10_house_code = userdata['d5'] self._x10_unit = userdata['d6'] self._ramp_rate = userdata['d7'] self._on_level = userdata['d8'] self._led_brightness = userdata['d9'] self._non_toggle_mask = userdata['d10'] self._led_bit_mask = userdata['d11'] self._x10_all_bit_mask = userdata['d12'] self._on_off_bit_mask = userdata['d13'] self._trigger_group_bit_mask = userdata['d14']
[ "def", "_set_status_data", "(", "self", ",", "userdata", ")", ":", "self", ".", "_on_mask", "=", "userdata", "[", "'d3'", "]", "self", ".", "_off_mask", "=", "userdata", "[", "'d4'", "]", "self", ".", "_x10_house_code", "=", "userdata", "[", "'d5'", "]", "self", ".", "_x10_unit", "=", "userdata", "[", "'d6'", "]", "self", ".", "_ramp_rate", "=", "userdata", "[", "'d7'", "]", "self", ".", "_on_level", "=", "userdata", "[", "'d8'", "]", "self", ".", "_led_brightness", "=", "userdata", "[", "'d9'", "]", "self", ".", "_non_toggle_mask", "=", "userdata", "[", "'d10'", "]", "self", ".", "_led_bit_mask", "=", "userdata", "[", "'d11'", "]", "self", ".", "_x10_all_bit_mask", "=", "userdata", "[", "'d12'", "]", "self", ".", "_on_off_bit_mask", "=", "userdata", "[", "'d13'", "]", "self", ".", "_trigger_group_bit_mask", "=", "userdata", "[", "'d14'", "]" ]
Set status properties from userdata response. Response values: d3: On Mask d4: Off Mask d5: X10 House Code d6: X10 Unit d7: Ramp Rate d8: On-Level d9: LED Brightness d10: Non-Toggle Mask d11: LED Bit Mask d12: X10 ALL Bit Mask d13: On/Off Bit Mask
[ "Set", "status", "properties", "from", "userdata", "response", "." ]
python
train
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L5665-L5685
def GetCachedPattern(self, patternId: int, cache: bool): """ Get a pattern by patternId. patternId: int, a value in class `PatternId`. Return a pattern if it supports the pattern else None. cache: bool, if True, store the pattern for later use, if False, get a new pattern by `self.GetPattern`. """ if cache: pattern = self._supportedPatterns.get(patternId, None) if pattern: return pattern else: pattern = self.GetPattern(patternId) if pattern: self._supportedPatterns[patternId] = pattern return pattern else: pattern = self.GetPattern(patternId) if pattern: self._supportedPatterns[patternId] = pattern return pattern
[ "def", "GetCachedPattern", "(", "self", ",", "patternId", ":", "int", ",", "cache", ":", "bool", ")", ":", "if", "cache", ":", "pattern", "=", "self", ".", "_supportedPatterns", ".", "get", "(", "patternId", ",", "None", ")", "if", "pattern", ":", "return", "pattern", "else", ":", "pattern", "=", "self", ".", "GetPattern", "(", "patternId", ")", "if", "pattern", ":", "self", ".", "_supportedPatterns", "[", "patternId", "]", "=", "pattern", "return", "pattern", "else", ":", "pattern", "=", "self", ".", "GetPattern", "(", "patternId", ")", "if", "pattern", ":", "self", ".", "_supportedPatterns", "[", "patternId", "]", "=", "pattern", "return", "pattern" ]
Get a pattern by patternId. patternId: int, a value in class `PatternId`. Return a pattern if it supports the pattern else None. cache: bool, if True, store the pattern for later use, if False, get a new pattern by `self.GetPattern`.
[ "Get", "a", "pattern", "by", "patternId", ".", "patternId", ":", "int", "a", "value", "in", "class", "PatternId", ".", "Return", "a", "pattern", "if", "it", "supports", "the", "pattern", "else", "None", ".", "cache", ":", "bool", "if", "True", "store", "the", "pattern", "for", "later", "use", "if", "False", "get", "a", "new", "pattern", "by", "self", ".", "GetPattern", "." ]
python
valid
twilio/twilio-python
twilio/rest/preview/sync/service/sync_map/sync_map_item.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/sync/service/sync_map/sync_map_item.py#L391-L406
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncMapItemContext for this SyncMapItemInstance :rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemContext """ if self._context is None: self._context = SyncMapItemContext( self._version, service_sid=self._solution['service_sid'], map_sid=self._solution['map_sid'], key=self._solution['key'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "SyncMapItemContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "map_sid", "=", "self", ".", "_solution", "[", "'map_sid'", "]", ",", "key", "=", "self", ".", "_solution", "[", "'key'", "]", ",", ")", "return", "self", ".", "_context" ]
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncMapItemContext for this SyncMapItemInstance :rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
limodou/uliweb
uliweb/utils/common.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L147-L187
def walk_dirs(path, include=None, include_ext=None, exclude=None, exclude_ext=None, recursion=True, file_only=False, use_default_pattern=True, patterns=None): """ path directory path resursion True will extract all sub module of mod """ default_exclude = ['.svn', '_svn', '.git'] default_exclude_ext = ['.pyc', '.pyo', '.bak', '.tmp'] exclude = exclude or [] exclude_ext = exclude_ext or [] include_ext = include_ext or [] include = include or [] if not os.path.exists(path): raise StopIteration for r in os.listdir(path): if match(r, exclude) or (use_default_pattern and r in default_exclude): continue if include and r not in include: continue fpath = os.path.join(path, r) if os.path.isdir(fpath): if not file_only: if patterns and match(r, patterns): yield os.path.normpath(fpath).replace('\\', '/') if recursion: for f in walk_dirs(fpath, include, include_ext, exclude, exclude_ext, recursion, file_only, use_default_pattern, patterns): yield os.path.normpath(f).replace('\\', '/') else: ext = os.path.splitext(fpath)[1] if ext in exclude_ext or (use_default_pattern and ext in default_exclude_ext): continue if include_ext and ext not in include_ext: continue if patterns: if not match(r, patterns): continue yield os.path.normpath(fpath).replace('\\', '/')
[ "def", "walk_dirs", "(", "path", ",", "include", "=", "None", ",", "include_ext", "=", "None", ",", "exclude", "=", "None", ",", "exclude_ext", "=", "None", ",", "recursion", "=", "True", ",", "file_only", "=", "False", ",", "use_default_pattern", "=", "True", ",", "patterns", "=", "None", ")", ":", "default_exclude", "=", "[", "'.svn'", ",", "'_svn'", ",", "'.git'", "]", "default_exclude_ext", "=", "[", "'.pyc'", ",", "'.pyo'", ",", "'.bak'", ",", "'.tmp'", "]", "exclude", "=", "exclude", "or", "[", "]", "exclude_ext", "=", "exclude_ext", "or", "[", "]", "include_ext", "=", "include_ext", "or", "[", "]", "include", "=", "include", "or", "[", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "StopIteration", "for", "r", "in", "os", ".", "listdir", "(", "path", ")", ":", "if", "match", "(", "r", ",", "exclude", ")", "or", "(", "use_default_pattern", "and", "r", "in", "default_exclude", ")", ":", "continue", "if", "include", "and", "r", "not", "in", "include", ":", "continue", "fpath", "=", "os", ".", "path", ".", "join", "(", "path", ",", "r", ")", "if", "os", ".", "path", ".", "isdir", "(", "fpath", ")", ":", "if", "not", "file_only", ":", "if", "patterns", "and", "match", "(", "r", ",", "patterns", ")", ":", "yield", "os", ".", "path", ".", "normpath", "(", "fpath", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "if", "recursion", ":", "for", "f", "in", "walk_dirs", "(", "fpath", ",", "include", ",", "include_ext", ",", "exclude", ",", "exclude_ext", ",", "recursion", ",", "file_only", ",", "use_default_pattern", ",", "patterns", ")", ":", "yield", "os", ".", "path", ".", "normpath", "(", "f", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "else", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fpath", ")", "[", "1", "]", "if", "ext", "in", "exclude_ext", "or", "(", "use_default_pattern", "and", "ext", "in", "default_exclude_ext", ")", ":", "continue", "if", "include_ext", "and", "ext", "not", "in", "include_ext", ":", "continue", "if", "patterns", ":", "if", "not", "match", "(", "r", ",", "patterns", ")", ":", "continue", "yield", "os", ".", "path", ".", "normpath", "(", "fpath", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")" ]
path directory path resursion True will extract all sub module of mod
[ "path", "directory", "path", "resursion", "True", "will", "extract", "all", "sub", "module", "of", "mod" ]
python
train
O365/python-o365
O365/excel.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/excel.py#L1402-L1410
def _get_range(self, endpoint_name): """ Returns a Range based on the endpoint name """ url = self.build_url(self._endpoints.get(endpoint_name)) response = self.session.get(url) if not response: return None data = response.json() return self.range_constructor(parent=self, **{self._cloud_data_key: data})
[ "def", "_get_range", "(", "self", ",", "endpoint_name", ")", ":", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "endpoint_name", ")", ")", "response", "=", "self", ".", "session", ".", "get", "(", "url", ")", "if", "not", "response", ":", "return", "None", "data", "=", "response", ".", "json", "(", ")", "return", "self", ".", "range_constructor", "(", "parent", "=", "self", ",", "*", "*", "{", "self", ".", "_cloud_data_key", ":", "data", "}", ")" ]
Returns a Range based on the endpoint name
[ "Returns", "a", "Range", "based", "on", "the", "endpoint", "name" ]
python
train
markovmodel/msmtools
msmtools/estimation/api.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/estimation/api.py#L458-L510
def largest_connected_set(C, directed=True): r"""Largest connected component for a directed graph with edge-weights given by the count matrix. Parameters ---------- C : scipy.sparse matrix Count matrix specifying edge weights. directed : bool, optional Whether to compute connected components for a directed or undirected graph. Default is True. Returns ------- lcc : array of integers The largest connected component of the directed graph. See also -------- connected_sets Notes ----- Viewing the count matrix as the adjacency matrix of a (directed) graph the largest connected set is the largest connected set of nodes of the corresponding graph. The largest connected set of a graph can be efficiently computed using Tarjan's algorithm. References ---------- .. [1] Tarjan, R E. 1972. Depth-first search and linear graph algorithms. SIAM Journal on Computing 1 (2): 146-160. Examples -------- >>> import numpy as np >>> from msmtools.estimation import largest_connected_set >>> C = np.array([[10, 1, 0], [2, 0, 3], [0, 0, 4]]) >>> lcc_directed = largest_connected_set(C) >>> lcc_directed array([0, 1]) >>> lcc_undirected = largest_connected_set(C, directed=False) >>> lcc_undirected array([0, 1, 2]) """ if isdense(C): return sparse.connectivity.largest_connected_set(csr_matrix(C), directed=directed) else: return sparse.connectivity.largest_connected_set(C, directed=directed)
[ "def", "largest_connected_set", "(", "C", ",", "directed", "=", "True", ")", ":", "if", "isdense", "(", "C", ")", ":", "return", "sparse", ".", "connectivity", ".", "largest_connected_set", "(", "csr_matrix", "(", "C", ")", ",", "directed", "=", "directed", ")", "else", ":", "return", "sparse", ".", "connectivity", ".", "largest_connected_set", "(", "C", ",", "directed", "=", "directed", ")" ]
r"""Largest connected component for a directed graph with edge-weights given by the count matrix. Parameters ---------- C : scipy.sparse matrix Count matrix specifying edge weights. directed : bool, optional Whether to compute connected components for a directed or undirected graph. Default is True. Returns ------- lcc : array of integers The largest connected component of the directed graph. See also -------- connected_sets Notes ----- Viewing the count matrix as the adjacency matrix of a (directed) graph the largest connected set is the largest connected set of nodes of the corresponding graph. The largest connected set of a graph can be efficiently computed using Tarjan's algorithm. References ---------- .. [1] Tarjan, R E. 1972. Depth-first search and linear graph algorithms. SIAM Journal on Computing 1 (2): 146-160. Examples -------- >>> import numpy as np >>> from msmtools.estimation import largest_connected_set >>> C = np.array([[10, 1, 0], [2, 0, 3], [0, 0, 4]]) >>> lcc_directed = largest_connected_set(C) >>> lcc_directed array([0, 1]) >>> lcc_undirected = largest_connected_set(C, directed=False) >>> lcc_undirected array([0, 1, 2])
[ "r", "Largest", "connected", "component", "for", "a", "directed", "graph", "with", "edge", "-", "weights", "given", "by", "the", "count", "matrix", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/menu_bar.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/menu_bar.py#L379-L388
def remove_all_callbacks(self): """ Remove all callbacks registered to the shortcut manager :return: """ for action in self.registered_shortcut_callbacks.keys(): for callback in self.registered_shortcut_callbacks[action]: self.shortcut_manager.remove_callback_for_action(action, callback) # delete all registered shortcut callbacks self.registered_shortcut_callbacks = {}
[ "def", "remove_all_callbacks", "(", "self", ")", ":", "for", "action", "in", "self", ".", "registered_shortcut_callbacks", ".", "keys", "(", ")", ":", "for", "callback", "in", "self", ".", "registered_shortcut_callbacks", "[", "action", "]", ":", "self", ".", "shortcut_manager", ".", "remove_callback_for_action", "(", "action", ",", "callback", ")", "# delete all registered shortcut callbacks", "self", ".", "registered_shortcut_callbacks", "=", "{", "}" ]
Remove all callbacks registered to the shortcut manager :return:
[ "Remove", "all", "callbacks", "registered", "to", "the", "shortcut", "manager", ":", "return", ":" ]
python
train
m32/endesive
endesive/pdf/fpdf/fpdf.py
https://github.com/m32/endesive/blob/973091dc69847fe2df594c80ac9235a8d08460ff/endesive/pdf/fpdf/fpdf.py#L1037-L1042
def set_x(self, x): "Set x position" if(x>=0): self.x=x else: self.x=self.w+x
[ "def", "set_x", "(", "self", ",", "x", ")", ":", "if", "(", "x", ">=", "0", ")", ":", "self", ".", "x", "=", "x", "else", ":", "self", ".", "x", "=", "self", ".", "w", "+", "x" ]
Set x position
[ "Set", "x", "position" ]
python
train
ska-sa/katcp-python
katcp/core.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/core.py#L295-L312
def format_argument(self, arg): """Format a Message argument to a string""" if isinstance(arg, float): return repr(arg) elif isinstance(arg, bool): return str(int(arg)) else: try: return str(arg) except UnicodeEncodeError: # unicode characters will break the str cast, so # try to encode to ascii and replace the offending characters # with a '?' character logger.error("Error casting message argument to str! " "Trying to encode argument to ascii.") if not isinstance(arg, unicode): arg = arg.decode('utf-8') return arg.encode('ascii', 'replace')
[ "def", "format_argument", "(", "self", ",", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "float", ")", ":", "return", "repr", "(", "arg", ")", "elif", "isinstance", "(", "arg", ",", "bool", ")", ":", "return", "str", "(", "int", "(", "arg", ")", ")", "else", ":", "try", ":", "return", "str", "(", "arg", ")", "except", "UnicodeEncodeError", ":", "# unicode characters will break the str cast, so", "# try to encode to ascii and replace the offending characters", "# with a '?' character", "logger", ".", "error", "(", "\"Error casting message argument to str! \"", "\"Trying to encode argument to ascii.\"", ")", "if", "not", "isinstance", "(", "arg", ",", "unicode", ")", ":", "arg", "=", "arg", ".", "decode", "(", "'utf-8'", ")", "return", "arg", ".", "encode", "(", "'ascii'", ",", "'replace'", ")" ]
Format a Message argument to a string
[ "Format", "a", "Message", "argument", "to", "a", "string" ]
python
train
SoCo/SoCo
soco/data_structures.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/data_structures.py#L151-L189
def from_element(cls, element): """Set the resource properties from a ``<res>`` element. Args: element (~xml.etree.ElementTree.Element): The ``<res>`` element """ def _int_helper(name): """Try to convert the name attribute to an int, or None.""" result = element.get(name) if result is not None: try: return int(result) except ValueError: raise DIDLMetadataError( 'Could not convert {0} to an integer'.format(name)) else: return None content = {} # required content['protocol_info'] = element.get('protocolInfo') if content['protocol_info'] is None: raise DIDLMetadataError('Could not create Resource from Element: ' 'protocolInfo not found (required).') # Optional content['import_uri'] = element.get('importUri') content['size'] = _int_helper('size') content['duration'] = element.get('duration') content['bitrate'] = _int_helper('bitrate') content['sample_frequency'] = _int_helper('sampleFrequency') content['bits_per_sample'] = _int_helper('bitsPerSample') content['nr_audio_channels'] = _int_helper('nrAudioChannels') content['resolution'] = element.get('resolution') content['color_depth'] = _int_helper('colorDepth') content['protection'] = element.get('protection') content['uri'] = element.text return cls(**content)
[ "def", "from_element", "(", "cls", ",", "element", ")", ":", "def", "_int_helper", "(", "name", ")", ":", "\"\"\"Try to convert the name attribute to an int, or None.\"\"\"", "result", "=", "element", ".", "get", "(", "name", ")", "if", "result", "is", "not", "None", ":", "try", ":", "return", "int", "(", "result", ")", "except", "ValueError", ":", "raise", "DIDLMetadataError", "(", "'Could not convert {0} to an integer'", ".", "format", "(", "name", ")", ")", "else", ":", "return", "None", "content", "=", "{", "}", "# required", "content", "[", "'protocol_info'", "]", "=", "element", ".", "get", "(", "'protocolInfo'", ")", "if", "content", "[", "'protocol_info'", "]", "is", "None", ":", "raise", "DIDLMetadataError", "(", "'Could not create Resource from Element: '", "'protocolInfo not found (required).'", ")", "# Optional", "content", "[", "'import_uri'", "]", "=", "element", ".", "get", "(", "'importUri'", ")", "content", "[", "'size'", "]", "=", "_int_helper", "(", "'size'", ")", "content", "[", "'duration'", "]", "=", "element", ".", "get", "(", "'duration'", ")", "content", "[", "'bitrate'", "]", "=", "_int_helper", "(", "'bitrate'", ")", "content", "[", "'sample_frequency'", "]", "=", "_int_helper", "(", "'sampleFrequency'", ")", "content", "[", "'bits_per_sample'", "]", "=", "_int_helper", "(", "'bitsPerSample'", ")", "content", "[", "'nr_audio_channels'", "]", "=", "_int_helper", "(", "'nrAudioChannels'", ")", "content", "[", "'resolution'", "]", "=", "element", ".", "get", "(", "'resolution'", ")", "content", "[", "'color_depth'", "]", "=", "_int_helper", "(", "'colorDepth'", ")", "content", "[", "'protection'", "]", "=", "element", ".", "get", "(", "'protection'", ")", "content", "[", "'uri'", "]", "=", "element", ".", "text", "return", "cls", "(", "*", "*", "content", ")" ]
Set the resource properties from a ``<res>`` element. Args: element (~xml.etree.ElementTree.Element): The ``<res>`` element
[ "Set", "the", "resource", "properties", "from", "a", "<res", ">", "element", "." ]
python
train
angr/angr
angr/engines/soot/values/thisref.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/engines/soot/values/thisref.py#L74-L86
def load_field(self, state, field_name, field_type): """ Load a field of a given object, without resolving hierachy :param state: angr state where we want to load the object attribute :type SimState :param field_name: name of the attribute :type str :param field_type: type of the attribute :type str """ field_ref = SimSootValue_InstanceFieldRef(self.heap_alloc_id, self.type, field_name, field_type) return state.memory.load(field_ref, none_if_missing=False)
[ "def", "load_field", "(", "self", ",", "state", ",", "field_name", ",", "field_type", ")", ":", "field_ref", "=", "SimSootValue_InstanceFieldRef", "(", "self", ".", "heap_alloc_id", ",", "self", ".", "type", ",", "field_name", ",", "field_type", ")", "return", "state", ".", "memory", ".", "load", "(", "field_ref", ",", "none_if_missing", "=", "False", ")" ]
Load a field of a given object, without resolving hierachy :param state: angr state where we want to load the object attribute :type SimState :param field_name: name of the attribute :type str :param field_type: type of the attribute :type str
[ "Load", "a", "field", "of", "a", "given", "object", "without", "resolving", "hierachy" ]
python
train
google/grr
grr/server/grr_response_server/aff4_objects/cronjobs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/cronjobs.py#L138-L164
def RunOnce(self, token=None, force=False, names=None): """Tries to lock and run cron jobs. Args: token: security token force: If True, force a run names: List of job names to run. If unset, run them all """ names = names or self.ListJobs(token=token) urns = [self.CRON_JOBS_PATH.Add(name) for name in names] for cron_job_urn in urns: try: with aff4.FACTORY.OpenWithLock( cron_job_urn, blocking=False, token=token, lease_time=600) as cron_job: try: logging.info("Running cron job: %s", cron_job.urn) cron_job.Run(force=force) except Exception as e: # pylint: disable=broad-except logging.exception("Error processing cron job %s: %s", cron_job.urn, e) stats_collector_instance.Get().IncrementCounter( "cron_internal_error") except aff4.LockError: pass
[ "def", "RunOnce", "(", "self", ",", "token", "=", "None", ",", "force", "=", "False", ",", "names", "=", "None", ")", ":", "names", "=", "names", "or", "self", ".", "ListJobs", "(", "token", "=", "token", ")", "urns", "=", "[", "self", ".", "CRON_JOBS_PATH", ".", "Add", "(", "name", ")", "for", "name", "in", "names", "]", "for", "cron_job_urn", "in", "urns", ":", "try", ":", "with", "aff4", ".", "FACTORY", ".", "OpenWithLock", "(", "cron_job_urn", ",", "blocking", "=", "False", ",", "token", "=", "token", ",", "lease_time", "=", "600", ")", "as", "cron_job", ":", "try", ":", "logging", ".", "info", "(", "\"Running cron job: %s\"", ",", "cron_job", ".", "urn", ")", "cron_job", ".", "Run", "(", "force", "=", "force", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=broad-except", "logging", ".", "exception", "(", "\"Error processing cron job %s: %s\"", ",", "cron_job", ".", "urn", ",", "e", ")", "stats_collector_instance", ".", "Get", "(", ")", ".", "IncrementCounter", "(", "\"cron_internal_error\"", ")", "except", "aff4", ".", "LockError", ":", "pass" ]
Tries to lock and run cron jobs. Args: token: security token force: If True, force a run names: List of job names to run. If unset, run them all
[ "Tries", "to", "lock", "and", "run", "cron", "jobs", "." ]
python
train
nickmckay/LiPD-utilities
Python/lipd/lpd_noaa.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/lpd_noaa.py#L889-L918
def __get_overall_data(self, x): """ (recursive) Collect all "sensorGenus" and "sensorSpecies" fields, set data to self :param any x: Any data type :return none: """ if isinstance(x, dict): if "sensorGenus" in x: if x["sensorGenus"] and x["sensorGenus"] not in self.lsts_tmp["genus"]: self.lsts_tmp["genus"].append(x["sensorGenus"]) if "sensorSpecies" in x: if x["sensorSpecies"] and x["sensorSpecies"] not in self.lsts_tmp["species"]: self.lsts_tmp["species"].append(x["sensorSpecies"]) if "archiveType" in x: if x["archiveType"] and x["archiveType"] not in self.lsts_tmp["archive"]: self.lsts_tmp["archive"].append(x["archiveType"]) if "QCnotes" in x: if x["QCnotes"] and x["QCnotes"] not in self.lsts_tmp["qc"]: self.lsts_tmp["qc"].append(x["QCnotes"]) for k, v in x.items(): if isinstance(v, dict): self.__get_overall_data(v) elif isinstance(v, list): self.__get_overall_data(v) elif isinstance(x, list): for i in x: self.__get_overall_data(i) return x
[ "def", "__get_overall_data", "(", "self", ",", "x", ")", ":", "if", "isinstance", "(", "x", ",", "dict", ")", ":", "if", "\"sensorGenus\"", "in", "x", ":", "if", "x", "[", "\"sensorGenus\"", "]", "and", "x", "[", "\"sensorGenus\"", "]", "not", "in", "self", ".", "lsts_tmp", "[", "\"genus\"", "]", ":", "self", ".", "lsts_tmp", "[", "\"genus\"", "]", ".", "append", "(", "x", "[", "\"sensorGenus\"", "]", ")", "if", "\"sensorSpecies\"", "in", "x", ":", "if", "x", "[", "\"sensorSpecies\"", "]", "and", "x", "[", "\"sensorSpecies\"", "]", "not", "in", "self", ".", "lsts_tmp", "[", "\"species\"", "]", ":", "self", ".", "lsts_tmp", "[", "\"species\"", "]", ".", "append", "(", "x", "[", "\"sensorSpecies\"", "]", ")", "if", "\"archiveType\"", "in", "x", ":", "if", "x", "[", "\"archiveType\"", "]", "and", "x", "[", "\"archiveType\"", "]", "not", "in", "self", ".", "lsts_tmp", "[", "\"archive\"", "]", ":", "self", ".", "lsts_tmp", "[", "\"archive\"", "]", ".", "append", "(", "x", "[", "\"archiveType\"", "]", ")", "if", "\"QCnotes\"", "in", "x", ":", "if", "x", "[", "\"QCnotes\"", "]", "and", "x", "[", "\"QCnotes\"", "]", "not", "in", "self", ".", "lsts_tmp", "[", "\"qc\"", "]", ":", "self", ".", "lsts_tmp", "[", "\"qc\"", "]", ".", "append", "(", "x", "[", "\"QCnotes\"", "]", ")", "for", "k", ",", "v", "in", "x", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "self", ".", "__get_overall_data", "(", "v", ")", "elif", "isinstance", "(", "v", ",", "list", ")", ":", "self", ".", "__get_overall_data", "(", "v", ")", "elif", "isinstance", "(", "x", ",", "list", ")", ":", "for", "i", "in", "x", ":", "self", ".", "__get_overall_data", "(", "i", ")", "return", "x" ]
(recursive) Collect all "sensorGenus" and "sensorSpecies" fields, set data to self :param any x: Any data type :return none:
[ "(", "recursive", ")", "Collect", "all", "sensorGenus", "and", "sensorSpecies", "fields", "set", "data", "to", "self", ":", "param", "any", "x", ":", "Any", "data", "type", ":", "return", "none", ":" ]
python
train
trailofbits/manticore
manticore/ethereum/detectors.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/ethereum/detectors.py#L377-L392
def _unsigned_sub_overflow(state, a, b): """ Sign extend the value to 512 bits and check the result can be represented in 256. Following there is a 32 bit excerpt of this condition: a - b ffffffff bfffffff 80000001 00000000 00000001 3ffffffff 7fffffff ffffffff True True True False True True True bfffffff True True True False False True True 80000001 True True True False False True True 00000000 False False False False False True False 00000001 True False False False False True False ffffffff True True True True True True True 7fffffff True True True False False True False """ cond = Operators.UGT(b, a) return cond
[ "def", "_unsigned_sub_overflow", "(", "state", ",", "a", ",", "b", ")", ":", "cond", "=", "Operators", ".", "UGT", "(", "b", ",", "a", ")", "return", "cond" ]
Sign extend the value to 512 bits and check the result can be represented in 256. Following there is a 32 bit excerpt of this condition: a - b ffffffff bfffffff 80000001 00000000 00000001 3ffffffff 7fffffff ffffffff True True True False True True True bfffffff True True True False False True True 80000001 True True True False False True True 00000000 False False False False False True False 00000001 True False False False False True False ffffffff True True True True True True True 7fffffff True True True False False True False
[ "Sign", "extend", "the", "value", "to", "512", "bits", "and", "check", "the", "result", "can", "be", "represented", "in", "256", ".", "Following", "there", "is", "a", "32", "bit", "excerpt", "of", "this", "condition", ":" ]
python
valid
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L5059-L5085
def CheckPrintf(filename, clean_lines, linenum, error): """Check for printf related issues. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # When snprintf is used, the second argument shouldn't be a literal. match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if match and match.group(2) != '0': # If 2nd arg is zero, snprintf is used to calculate size. error(filename, linenum, 'runtime/printf', 3, 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 'to snprintf.' % (match.group(1), match.group(2))) # Check if some verboten C functions are being used. if Search(r'\bsprintf\s*\(', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search(r'\b(strcpy|strcat)\s*\(', line) if match: error(filename, linenum, 'runtime/printf', 4, 'Almost always, snprintf is better than %s' % match.group(1))
[ "def", "CheckPrintf", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# When snprintf is used, the second argument shouldn't be a literal.", "match", "=", "Search", "(", "r'snprintf\\s*\\(([^,]*),\\s*([0-9]*)\\s*,'", ",", "line", ")", "if", "match", "and", "match", ".", "group", "(", "2", ")", "!=", "'0'", ":", "# If 2nd arg is zero, snprintf is used to calculate size.", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "3", ",", "'If you can, use sizeof(%s) instead of %s as the 2nd arg '", "'to snprintf.'", "%", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ")", ")", "# Check if some verboten C functions are being used.", "if", "Search", "(", "r'\\bsprintf\\s*\\('", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "5", ",", "'Never use sprintf. Use snprintf instead.'", ")", "match", "=", "Search", "(", "r'\\b(strcpy|strcat)\\s*\\('", ",", "line", ")", "if", "match", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "4", ",", "'Almost always, snprintf is better than %s'", "%", "match", ".", "group", "(", "1", ")", ")" ]
Check for printf related issues. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
[ "Check", "for", "printf", "related", "issues", "." ]
python
valid
BerkeleyAutomation/autolab_core
autolab_core/transformations.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/transformations.py#L1649-L1662
def concatenate_matrices(*matrices): """Return concatenation of series of transformation matrices. >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 >>> numpy.allclose(M, concatenate_matrices(M)) True >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) True """ M = numpy.identity(4) for i in matrices: M = numpy.dot(M, i) return M
[ "def", "concatenate_matrices", "(", "*", "matrices", ")", ":", "M", "=", "numpy", ".", "identity", "(", "4", ")", "for", "i", "in", "matrices", ":", "M", "=", "numpy", ".", "dot", "(", "M", ",", "i", ")", "return", "M" ]
Return concatenation of series of transformation matrices. >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 >>> numpy.allclose(M, concatenate_matrices(M)) True >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) True
[ "Return", "concatenation", "of", "series", "of", "transformation", "matrices", "." ]
python
train
raphaelvallat/pingouin
pingouin/plotting.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/plotting.py#L321-L486
def qqplot(x, dist='norm', sparams=(), confidence=.95, figsize=(5, 4), ax=None): """Quantile-Quantile plot. Parameters ---------- x : array_like Sample data. dist : str or stats.distributions instance, optional Distribution or distribution function name. The default is 'norm' for a normal probability plot. Objects that look enough like a `scipy.stats.distributions` instance (i.e. they have a ``ppf`` method) are also accepted. sparams : tuple, optional Distribution-specific shape parameters (shape parameters, location, and scale). See :py:func:`scipy.stats.probplot` for more details. confidence : float Confidence level (.95 = 95%) for point-wise confidence envelope. Pass False for no envelope. figsize : tuple Figsize in inches ax : matplotlib axes Axis on which to draw the plot Returns ------- ax : Matplotlib Axes instance Returns the Axes object with the plot for further tweaking. Notes ----- This function returns a scatter plot of the quantile of the sample data `x` against the theoretical quantiles of the distribution given in `dist` (default = 'norm'). The points plotted in a Q–Q plot are always non-decreasing when viewed from left to right. If the two distributions being compared are identical, the Q–Q plot follows the 45° line y = x. If the two distributions agree after linearly transforming the values in one of the distributions, then the Q–Q plot follows some line, but not necessarily the line y = x. If the general trend of the Q–Q plot is flatter than the line y = x, the distribution plotted on the horizontal axis is more dispersed than the distribution plotted on the vertical axis. Conversely, if the general trend of the Q–Q plot is steeper than the line y = x, the distribution plotted on the vertical axis is more dispersed than the distribution plotted on the horizontal axis. Q–Q plots are often arced, or "S" shaped, indicating that one of the distributions is more skewed than the other, or that one of the distributions has heavier tails than the other. In addition, the function also plots a best-fit line (linear regression) for the data and annotates the plot with the coefficient of determination :math:`R^2`. Note that the intercept and slope of the linear regression between the quantiles gives a measure of the relative location and relative scale of the samples. References ---------- .. [1] https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot .. [2] https://github.com/cran/car/blob/master/R/qqPlot.R .. [3] Fox, J. (2008), Applied Regression Analysis and Generalized Linear Models, 2nd Ed., Sage Publications, Inc. Examples -------- Q-Q plot using a normal theoretical distribution: .. plot:: >>> import numpy as np >>> import pingouin as pg >>> np.random.seed(123) >>> x = np.random.normal(size=50) >>> ax = pg.qqplot(x, dist='norm') Two Q-Q plots using two separate axes: .. plot:: >>> import numpy as np >>> import pingouin as pg >>> import matplotlib.pyplot as plt >>> np.random.seed(123) >>> x = np.random.normal(size=50) >>> x_exp = np.random.exponential(size=50) >>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4)) >>> ax1 = pg.qqplot(x, dist='norm', ax=ax1, confidence=False) >>> ax2 = pg.qqplot(x_exp, dist='expon', ax=ax2) Using custom location / scale parameters as well as another Seaborn style .. plot:: >>> import numpy as np >>> import seaborn as sns >>> import pingouin as pg >>> import matplotlib.pyplot as plt >>> np.random.seed(123) >>> x = np.random.normal(size=50) >>> mean, std = 0, 0.8 >>> sns.set_style('darkgrid') >>> ax = pg.qqplot(x, dist='norm', sparams=(mean, std)) """ if isinstance(dist, str): dist = getattr(stats, dist) x = np.asarray(x) x = x[~np.isnan(x)] # NaN are automatically removed # Extract quantiles and regression quantiles = stats.probplot(x, sparams=sparams, dist=dist, fit=False) theor, observed = quantiles[0], quantiles[1] fit_params = dist.fit(x) loc = fit_params[-2] scale = fit_params[-1] shape = fit_params[0] if len(fit_params) == 3 else None # Observed values to observed quantiles if loc != 0 and scale != 1: observed = (np.sort(observed) - fit_params[-2]) / fit_params[-1] # Linear regression slope, intercept, r, _, _ = stats.linregress(theor, observed) # Start the plot if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.plot(theor, observed, 'bo') stats.morestats._add_axis_labels_title(ax, xlabel='Theoretical quantiles', ylabel='Ordered quantiles', title='Q-Q Plot') # Add diagonal line end_pts = [ax.get_xlim(), ax.get_ylim()] end_pts[0] = min(end_pts[0]) end_pts[1] = max(end_pts[1]) ax.plot(end_pts, end_pts, color='slategrey', lw=1.5) ax.set_xlim(end_pts) ax.set_ylim(end_pts) # Add regression line and annotate R2 fit_val = slope * theor + intercept ax.plot(theor, fit_val, 'r-', lw=2) posx = end_pts[0] + 0.60 * (end_pts[1] - end_pts[0]) posy = end_pts[0] + 0.10 * (end_pts[1] - end_pts[0]) ax.text(posx, posy, "$R^2=%.3f$" % r**2) if confidence is not False: # Confidence envelope n = x.size P = _ppoints(n) crit = stats.norm.ppf(1 - (1 - confidence) / 2) pdf = dist.pdf(theor) if shape is None else dist.pdf(theor, shape) se = (slope / pdf) * np.sqrt(P * (1 - P) / n) upper = fit_val + crit * se lower = fit_val - crit * se ax.plot(theor, upper, 'r--', lw=1.25) ax.plot(theor, lower, 'r--', lw=1.25) return ax
[ "def", "qqplot", "(", "x", ",", "dist", "=", "'norm'", ",", "sparams", "=", "(", ")", ",", "confidence", "=", ".95", ",", "figsize", "=", "(", "5", ",", "4", ")", ",", "ax", "=", "None", ")", ":", "if", "isinstance", "(", "dist", ",", "str", ")", ":", "dist", "=", "getattr", "(", "stats", ",", "dist", ")", "x", "=", "np", ".", "asarray", "(", "x", ")", "x", "=", "x", "[", "~", "np", ".", "isnan", "(", "x", ")", "]", "# NaN are automatically removed", "# Extract quantiles and regression", "quantiles", "=", "stats", ".", "probplot", "(", "x", ",", "sparams", "=", "sparams", ",", "dist", "=", "dist", ",", "fit", "=", "False", ")", "theor", ",", "observed", "=", "quantiles", "[", "0", "]", ",", "quantiles", "[", "1", "]", "fit_params", "=", "dist", ".", "fit", "(", "x", ")", "loc", "=", "fit_params", "[", "-", "2", "]", "scale", "=", "fit_params", "[", "-", "1", "]", "shape", "=", "fit_params", "[", "0", "]", "if", "len", "(", "fit_params", ")", "==", "3", "else", "None", "# Observed values to observed quantiles", "if", "loc", "!=", "0", "and", "scale", "!=", "1", ":", "observed", "=", "(", "np", ".", "sort", "(", "observed", ")", "-", "fit_params", "[", "-", "2", "]", ")", "/", "fit_params", "[", "-", "1", "]", "# Linear regression", "slope", ",", "intercept", ",", "r", ",", "_", ",", "_", "=", "stats", ".", "linregress", "(", "theor", ",", "observed", ")", "# Start the plot", "if", "ax", "is", "None", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ",", "1", ",", "figsize", "=", "figsize", ")", "ax", ".", "plot", "(", "theor", ",", "observed", ",", "'bo'", ")", "stats", ".", "morestats", ".", "_add_axis_labels_title", "(", "ax", ",", "xlabel", "=", "'Theoretical quantiles'", ",", "ylabel", "=", "'Ordered quantiles'", ",", "title", "=", "'Q-Q Plot'", ")", "# Add diagonal line", "end_pts", "=", "[", "ax", ".", "get_xlim", "(", ")", ",", "ax", ".", "get_ylim", "(", ")", "]", "end_pts", "[", "0", "]", "=", "min", "(", "end_pts", "[", "0", "]", ")", "end_pts", "[", "1", "]", "=", "max", "(", "end_pts", "[", "1", "]", ")", "ax", ".", "plot", "(", "end_pts", ",", "end_pts", ",", "color", "=", "'slategrey'", ",", "lw", "=", "1.5", ")", "ax", ".", "set_xlim", "(", "end_pts", ")", "ax", ".", "set_ylim", "(", "end_pts", ")", "# Add regression line and annotate R2", "fit_val", "=", "slope", "*", "theor", "+", "intercept", "ax", ".", "plot", "(", "theor", ",", "fit_val", ",", "'r-'", ",", "lw", "=", "2", ")", "posx", "=", "end_pts", "[", "0", "]", "+", "0.60", "*", "(", "end_pts", "[", "1", "]", "-", "end_pts", "[", "0", "]", ")", "posy", "=", "end_pts", "[", "0", "]", "+", "0.10", "*", "(", "end_pts", "[", "1", "]", "-", "end_pts", "[", "0", "]", ")", "ax", ".", "text", "(", "posx", ",", "posy", ",", "\"$R^2=%.3f$\"", "%", "r", "**", "2", ")", "if", "confidence", "is", "not", "False", ":", "# Confidence envelope", "n", "=", "x", ".", "size", "P", "=", "_ppoints", "(", "n", ")", "crit", "=", "stats", ".", "norm", ".", "ppf", "(", "1", "-", "(", "1", "-", "confidence", ")", "/", "2", ")", "pdf", "=", "dist", ".", "pdf", "(", "theor", ")", "if", "shape", "is", "None", "else", "dist", ".", "pdf", "(", "theor", ",", "shape", ")", "se", "=", "(", "slope", "/", "pdf", ")", "*", "np", ".", "sqrt", "(", "P", "*", "(", "1", "-", "P", ")", "/", "n", ")", "upper", "=", "fit_val", "+", "crit", "*", "se", "lower", "=", "fit_val", "-", "crit", "*", "se", "ax", ".", "plot", "(", "theor", ",", "upper", ",", "'r--'", ",", "lw", "=", "1.25", ")", "ax", ".", "plot", "(", "theor", ",", "lower", ",", "'r--'", ",", "lw", "=", "1.25", ")", "return", "ax" ]
Quantile-Quantile plot. Parameters ---------- x : array_like Sample data. dist : str or stats.distributions instance, optional Distribution or distribution function name. The default is 'norm' for a normal probability plot. Objects that look enough like a `scipy.stats.distributions` instance (i.e. they have a ``ppf`` method) are also accepted. sparams : tuple, optional Distribution-specific shape parameters (shape parameters, location, and scale). See :py:func:`scipy.stats.probplot` for more details. confidence : float Confidence level (.95 = 95%) for point-wise confidence envelope. Pass False for no envelope. figsize : tuple Figsize in inches ax : matplotlib axes Axis on which to draw the plot Returns ------- ax : Matplotlib Axes instance Returns the Axes object with the plot for further tweaking. Notes ----- This function returns a scatter plot of the quantile of the sample data `x` against the theoretical quantiles of the distribution given in `dist` (default = 'norm'). The points plotted in a Q–Q plot are always non-decreasing when viewed from left to right. If the two distributions being compared are identical, the Q–Q plot follows the 45° line y = x. If the two distributions agree after linearly transforming the values in one of the distributions, then the Q–Q plot follows some line, but not necessarily the line y = x. If the general trend of the Q–Q plot is flatter than the line y = x, the distribution plotted on the horizontal axis is more dispersed than the distribution plotted on the vertical axis. Conversely, if the general trend of the Q–Q plot is steeper than the line y = x, the distribution plotted on the vertical axis is more dispersed than the distribution plotted on the horizontal axis. Q–Q plots are often arced, or "S" shaped, indicating that one of the distributions is more skewed than the other, or that one of the distributions has heavier tails than the other. In addition, the function also plots a best-fit line (linear regression) for the data and annotates the plot with the coefficient of determination :math:`R^2`. Note that the intercept and slope of the linear regression between the quantiles gives a measure of the relative location and relative scale of the samples. References ---------- .. [1] https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot .. [2] https://github.com/cran/car/blob/master/R/qqPlot.R .. [3] Fox, J. (2008), Applied Regression Analysis and Generalized Linear Models, 2nd Ed., Sage Publications, Inc. Examples -------- Q-Q plot using a normal theoretical distribution: .. plot:: >>> import numpy as np >>> import pingouin as pg >>> np.random.seed(123) >>> x = np.random.normal(size=50) >>> ax = pg.qqplot(x, dist='norm') Two Q-Q plots using two separate axes: .. plot:: >>> import numpy as np >>> import pingouin as pg >>> import matplotlib.pyplot as plt >>> np.random.seed(123) >>> x = np.random.normal(size=50) >>> x_exp = np.random.exponential(size=50) >>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4)) >>> ax1 = pg.qqplot(x, dist='norm', ax=ax1, confidence=False) >>> ax2 = pg.qqplot(x_exp, dist='expon', ax=ax2) Using custom location / scale parameters as well as another Seaborn style .. plot:: >>> import numpy as np >>> import seaborn as sns >>> import pingouin as pg >>> import matplotlib.pyplot as plt >>> np.random.seed(123) >>> x = np.random.normal(size=50) >>> mean, std = 0, 0.8 >>> sns.set_style('darkgrid') >>> ax = pg.qqplot(x, dist='norm', sparams=(mean, std))
[ "Quantile", "-", "Quantile", "plot", "." ]
python
train
quantumlib/Cirq
cirq/circuits/text_diagram_drawer.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/text_diagram_drawer.py#L239-L307
def render(self, horizontal_spacing: int = 1, vertical_spacing: int = 1, crossing_char: str = None, use_unicode_characters: bool = True) -> str: """Outputs text containing the diagram.""" block_diagram = BlockDiagramDrawer() w = self.width() h = self.height() # Communicate padding into block diagram. for x in range(0, w - 1): block_diagram.set_col_min_width( x*2 + 1, # Horizontal separation looks narrow, so partials round up. int(np.ceil(self.horizontal_padding.get(x, horizontal_spacing))) ) block_diagram.set_col_min_width(x*2, 1) for y in range(0, h - 1): block_diagram.set_row_min_height( y*2 + 1, # Vertical separation looks wide, so partials round down. int(np.floor(self.vertical_padding.get(y, vertical_spacing))) ) block_diagram.set_row_min_height(y*2, 1) # Draw vertical lines. for x_b, y1_b, y2_b, emphasize in self.vertical_lines: x = int(x_b * 2) y1, y2 = int(min(y1_b, y2_b) * 2), int(max(y1_b, y2_b) * 2) charset = pick_charset(use_unicode_characters, emphasize) # Caps. block_diagram.mutable_block(x, y1).draw_curve( charset, bottom=True) block_diagram.mutable_block(x, y2).draw_curve( charset, top=True) # Span. for y in range(y1 + 1, y2): block_diagram.mutable_block(x, y).draw_curve( charset, top=True, bottom=True) # Draw horizontal lines. for y_b, x1_b, x2_b, emphasize in self.horizontal_lines: y = int(y_b * 2) x1, x2 = int(min(x1_b, x2_b) * 2), int(max(x1_b, x2_b) * 2) charset = pick_charset(use_unicode_characters, emphasize) # Caps. block_diagram.mutable_block(x1, y).draw_curve( charset, right=True) block_diagram.mutable_block(x2, y).draw_curve( charset, left=True) # Span. for x in range(x1 + 1, x2): block_diagram.mutable_block(x, y).draw_curve( charset, left=True, right=True, crossing_char=crossing_char) # Place entries. for (x, y), v in self.entries.items(): x *= 2 y *= 2 block_diagram.mutable_block(x, y).content = v.text return block_diagram.render()
[ "def", "render", "(", "self", ",", "horizontal_spacing", ":", "int", "=", "1", ",", "vertical_spacing", ":", "int", "=", "1", ",", "crossing_char", ":", "str", "=", "None", ",", "use_unicode_characters", ":", "bool", "=", "True", ")", "->", "str", ":", "block_diagram", "=", "BlockDiagramDrawer", "(", ")", "w", "=", "self", ".", "width", "(", ")", "h", "=", "self", ".", "height", "(", ")", "# Communicate padding into block diagram.", "for", "x", "in", "range", "(", "0", ",", "w", "-", "1", ")", ":", "block_diagram", ".", "set_col_min_width", "(", "x", "*", "2", "+", "1", ",", "# Horizontal separation looks narrow, so partials round up.", "int", "(", "np", ".", "ceil", "(", "self", ".", "horizontal_padding", ".", "get", "(", "x", ",", "horizontal_spacing", ")", ")", ")", ")", "block_diagram", ".", "set_col_min_width", "(", "x", "*", "2", ",", "1", ")", "for", "y", "in", "range", "(", "0", ",", "h", "-", "1", ")", ":", "block_diagram", ".", "set_row_min_height", "(", "y", "*", "2", "+", "1", ",", "# Vertical separation looks wide, so partials round down.", "int", "(", "np", ".", "floor", "(", "self", ".", "vertical_padding", ".", "get", "(", "y", ",", "vertical_spacing", ")", ")", ")", ")", "block_diagram", ".", "set_row_min_height", "(", "y", "*", "2", ",", "1", ")", "# Draw vertical lines.", "for", "x_b", ",", "y1_b", ",", "y2_b", ",", "emphasize", "in", "self", ".", "vertical_lines", ":", "x", "=", "int", "(", "x_b", "*", "2", ")", "y1", ",", "y2", "=", "int", "(", "min", "(", "y1_b", ",", "y2_b", ")", "*", "2", ")", ",", "int", "(", "max", "(", "y1_b", ",", "y2_b", ")", "*", "2", ")", "charset", "=", "pick_charset", "(", "use_unicode_characters", ",", "emphasize", ")", "# Caps.", "block_diagram", ".", "mutable_block", "(", "x", ",", "y1", ")", ".", "draw_curve", "(", "charset", ",", "bottom", "=", "True", ")", "block_diagram", ".", "mutable_block", "(", "x", ",", "y2", ")", ".", "draw_curve", "(", "charset", ",", "top", "=", "True", ")", "# Span.", "for", "y", "in", "range", "(", "y1", "+", "1", ",", "y2", ")", ":", "block_diagram", ".", "mutable_block", "(", "x", ",", "y", ")", ".", "draw_curve", "(", "charset", ",", "top", "=", "True", ",", "bottom", "=", "True", ")", "# Draw horizontal lines.", "for", "y_b", ",", "x1_b", ",", "x2_b", ",", "emphasize", "in", "self", ".", "horizontal_lines", ":", "y", "=", "int", "(", "y_b", "*", "2", ")", "x1", ",", "x2", "=", "int", "(", "min", "(", "x1_b", ",", "x2_b", ")", "*", "2", ")", ",", "int", "(", "max", "(", "x1_b", ",", "x2_b", ")", "*", "2", ")", "charset", "=", "pick_charset", "(", "use_unicode_characters", ",", "emphasize", ")", "# Caps.", "block_diagram", ".", "mutable_block", "(", "x1", ",", "y", ")", ".", "draw_curve", "(", "charset", ",", "right", "=", "True", ")", "block_diagram", ".", "mutable_block", "(", "x2", ",", "y", ")", ".", "draw_curve", "(", "charset", ",", "left", "=", "True", ")", "# Span.", "for", "x", "in", "range", "(", "x1", "+", "1", ",", "x2", ")", ":", "block_diagram", ".", "mutable_block", "(", "x", ",", "y", ")", ".", "draw_curve", "(", "charset", ",", "left", "=", "True", ",", "right", "=", "True", ",", "crossing_char", "=", "crossing_char", ")", "# Place entries.", "for", "(", "x", ",", "y", ")", ",", "v", "in", "self", ".", "entries", ".", "items", "(", ")", ":", "x", "*=", "2", "y", "*=", "2", "block_diagram", ".", "mutable_block", "(", "x", ",", "y", ")", ".", "content", "=", "v", ".", "text", "return", "block_diagram", ".", "render", "(", ")" ]
Outputs text containing the diagram.
[ "Outputs", "text", "containing", "the", "diagram", "." ]
python
train
HazardDede/dictmentor
dictmentor/extensions.py
https://github.com/HazardDede/dictmentor/blob/f50ca26ed04f7a924cde6e4d464c4f6ccba4e320/dictmentor/extensions.py#L220-L225
def _config(self) -> ExtensionConfig: """ Tells the processor to look for the specified pattern in node key only. Returns: The scanner configuration. """ return dict(pattern=self.__pattern__, search_in_keys=True, search_in_values=False)
[ "def", "_config", "(", "self", ")", "->", "ExtensionConfig", ":", "return", "dict", "(", "pattern", "=", "self", ".", "__pattern__", ",", "search_in_keys", "=", "True", ",", "search_in_values", "=", "False", ")" ]
Tells the processor to look for the specified pattern in node key only. Returns: The scanner configuration.
[ "Tells", "the", "processor", "to", "look", "for", "the", "specified", "pattern", "in", "node", "key", "only", ".", "Returns", ":", "The", "scanner", "configuration", "." ]
python
train
sernst/cauldron
cauldron/session/exposed.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/session/exposed.py#L50-L52
def shared(self) -> typing.Union[None, SharedCache]: """The shared display object associated with this project.""" return self._project.shared if self._project else None
[ "def", "shared", "(", "self", ")", "->", "typing", ".", "Union", "[", "None", ",", "SharedCache", "]", ":", "return", "self", ".", "_project", ".", "shared", "if", "self", ".", "_project", "else", "None" ]
The shared display object associated with this project.
[ "The", "shared", "display", "object", "associated", "with", "this", "project", "." ]
python
train
git-afsantos/bonsai
bonsai/model.py
https://github.com/git-afsantos/bonsai/blob/aa5af3f535b3b506bfc95c107c501fc9c4bcd072/bonsai/model.py#L1350-L1354
def get_branches(self): """Return a list with the conditional branch and the default branch.""" if self.else_branch: return [self.then_branch, self.else_branch] return [self.then_branch]
[ "def", "get_branches", "(", "self", ")", ":", "if", "self", ".", "else_branch", ":", "return", "[", "self", ".", "then_branch", ",", "self", ".", "else_branch", "]", "return", "[", "self", ".", "then_branch", "]" ]
Return a list with the conditional branch and the default branch.
[ "Return", "a", "list", "with", "the", "conditional", "branch", "and", "the", "default", "branch", "." ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/file/snapshot/order.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/file/snapshot/order.py#L27-L53
def cli(env, volume_id, capacity, tier, upgrade): """Order snapshot space for a file storage volume.""" file_manager = SoftLayer.FileStorageManager(env.client) if tier is not None: tier = float(tier) try: order = file_manager.order_snapshot_space( volume_id, capacity=capacity, tier=tier, upgrade=upgrade ) except ValueError as ex: raise exceptions.ArgumentError(str(ex)) if 'placedOrder' in order.keys(): click.echo("Order #{0} placed successfully!".format( order['placedOrder']['id'])) for item in order['placedOrder']['items']: click.echo(" > %s" % item['description']) if 'status' in order['placedOrder'].keys(): click.echo(" > Order status: %s" % order['placedOrder']['status']) else: click.echo("Order could not be placed! Please verify your options " + "and try again.")
[ "def", "cli", "(", "env", ",", "volume_id", ",", "capacity", ",", "tier", ",", "upgrade", ")", ":", "file_manager", "=", "SoftLayer", ".", "FileStorageManager", "(", "env", ".", "client", ")", "if", "tier", "is", "not", "None", ":", "tier", "=", "float", "(", "tier", ")", "try", ":", "order", "=", "file_manager", ".", "order_snapshot_space", "(", "volume_id", ",", "capacity", "=", "capacity", ",", "tier", "=", "tier", ",", "upgrade", "=", "upgrade", ")", "except", "ValueError", "as", "ex", ":", "raise", "exceptions", ".", "ArgumentError", "(", "str", "(", "ex", ")", ")", "if", "'placedOrder'", "in", "order", ".", "keys", "(", ")", ":", "click", ".", "echo", "(", "\"Order #{0} placed successfully!\"", ".", "format", "(", "order", "[", "'placedOrder'", "]", "[", "'id'", "]", ")", ")", "for", "item", "in", "order", "[", "'placedOrder'", "]", "[", "'items'", "]", ":", "click", ".", "echo", "(", "\" > %s\"", "%", "item", "[", "'description'", "]", ")", "if", "'status'", "in", "order", "[", "'placedOrder'", "]", ".", "keys", "(", ")", ":", "click", ".", "echo", "(", "\" > Order status: %s\"", "%", "order", "[", "'placedOrder'", "]", "[", "'status'", "]", ")", "else", ":", "click", ".", "echo", "(", "\"Order could not be placed! Please verify your options \"", "+", "\"and try again.\"", ")" ]
Order snapshot space for a file storage volume.
[ "Order", "snapshot", "space", "for", "a", "file", "storage", "volume", "." ]
python
train
Azure/azure-cli-extensions
src/interactive/azext_interactive/azclishell/app.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/interactive/azext_interactive/azclishell/app.py#L431-L468
def example_repl(self, text, example, start_index, continue_flag): """ REPL for interactive tutorials """ if start_index: start_index = start_index + 1 cmd = ' '.join(text.split()[:start_index]) example_cli = CommandLineInterface( application=self.create_application( full_layout=False), eventloop=create_eventloop()) example_cli.buffers['example_line'].reset( initial_document=Document(u'{}\n'.format( add_new_lines(example))) ) while start_index < len(text.split()): if self.default_command: cmd = cmd.replace(self.default_command + ' ', '') example_cli.buffers[DEFAULT_BUFFER].reset( initial_document=Document( u'{}'.format(cmd), cursor_position=len(cmd))) example_cli.request_redraw() answer = example_cli.run() if not answer: return "", True answer = answer.text if answer.strip('\n') == cmd.strip('\n'): continue else: if len(answer.split()) > 1: start_index += 1 cmd += " " + answer.split()[-1] + " " +\ u' '.join(text.split()[start_index:start_index + 1]) example_cli.exit() del example_cli else: cmd = text return cmd, continue_flag
[ "def", "example_repl", "(", "self", ",", "text", ",", "example", ",", "start_index", ",", "continue_flag", ")", ":", "if", "start_index", ":", "start_index", "=", "start_index", "+", "1", "cmd", "=", "' '", ".", "join", "(", "text", ".", "split", "(", ")", "[", ":", "start_index", "]", ")", "example_cli", "=", "CommandLineInterface", "(", "application", "=", "self", ".", "create_application", "(", "full_layout", "=", "False", ")", ",", "eventloop", "=", "create_eventloop", "(", ")", ")", "example_cli", ".", "buffers", "[", "'example_line'", "]", ".", "reset", "(", "initial_document", "=", "Document", "(", "u'{}\\n'", ".", "format", "(", "add_new_lines", "(", "example", ")", ")", ")", ")", "while", "start_index", "<", "len", "(", "text", ".", "split", "(", ")", ")", ":", "if", "self", ".", "default_command", ":", "cmd", "=", "cmd", ".", "replace", "(", "self", ".", "default_command", "+", "' '", ",", "''", ")", "example_cli", ".", "buffers", "[", "DEFAULT_BUFFER", "]", ".", "reset", "(", "initial_document", "=", "Document", "(", "u'{}'", ".", "format", "(", "cmd", ")", ",", "cursor_position", "=", "len", "(", "cmd", ")", ")", ")", "example_cli", ".", "request_redraw", "(", ")", "answer", "=", "example_cli", ".", "run", "(", ")", "if", "not", "answer", ":", "return", "\"\"", ",", "True", "answer", "=", "answer", ".", "text", "if", "answer", ".", "strip", "(", "'\\n'", ")", "==", "cmd", ".", "strip", "(", "'\\n'", ")", ":", "continue", "else", ":", "if", "len", "(", "answer", ".", "split", "(", ")", ")", ">", "1", ":", "start_index", "+=", "1", "cmd", "+=", "\" \"", "+", "answer", ".", "split", "(", ")", "[", "-", "1", "]", "+", "\" \"", "+", "u' '", ".", "join", "(", "text", ".", "split", "(", ")", "[", "start_index", ":", "start_index", "+", "1", "]", ")", "example_cli", ".", "exit", "(", ")", "del", "example_cli", "else", ":", "cmd", "=", "text", "return", "cmd", ",", "continue_flag" ]
REPL for interactive tutorials
[ "REPL", "for", "interactive", "tutorials" ]
python
train
apache/spark
python/pyspark/ml/param/__init__.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L138-L146
def toListInt(value): """ Convert a value to list of ints, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_integer(v), value)): return [int(v) for v in value] raise TypeError("Could not convert %s to list of ints" % value)
[ "def", "toListInt", "(", "value", ")", ":", "if", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters", ".", "_is_integer", "(", "v", ")", ",", "value", ")", ")", ":", "return", "[", "int", "(", "v", ")", "for", "v", "in", "value", "]", "raise", "TypeError", "(", "\"Could not convert %s to list of ints\"", "%", "value", ")" ]
Convert a value to list of ints, if possible.
[ "Convert", "a", "value", "to", "list", "of", "ints", "if", "possible", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L7333-L7337
def xpathEval(self, str): """Evaluate the XPath Location Path in the given context. """ ret = libxml2mod.xmlXPathEval(str, self._o) if ret is None:raise xpathError('xmlXPathEval() failed') return xpathObjectRet(ret)
[ "def", "xpathEval", "(", "self", ",", "str", ")", ":", "ret", "=", "libxml2mod", ".", "xmlXPathEval", "(", "str", ",", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "xpathError", "(", "'xmlXPathEval() failed'", ")", "return", "xpathObjectRet", "(", "ret", ")" ]
Evaluate the XPath Location Path in the given context.
[ "Evaluate", "the", "XPath", "Location", "Path", "in", "the", "given", "context", "." ]
python
train
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/internal/formatting/deserialize.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/internal/formatting/deserialize.py#L52-L73
def validate_header(header, header_auth, raw_header, data_key): """Validates the header using the header authentication data. :param header: Deserialized header :type header: aws_encryption_sdk.structures.MessageHeader :param header_auth: Deserialized header auth :type header_auth: aws_encryption_sdk.internal.structures.MessageHeaderAuthentication :type stream: io.BytesIO :param bytes raw_header: Raw header bytes :param bytes data_key: Data key with which to perform validation :raises SerializationError: if header authorization fails """ _LOGGER.debug("Starting header validation") try: decrypt( algorithm=header.algorithm, key=data_key, encrypted_data=EncryptedData(header_auth.iv, b"", header_auth.tag), associated_data=raw_header, ) except InvalidTag: raise SerializationError("Header authorization failed")
[ "def", "validate_header", "(", "header", ",", "header_auth", ",", "raw_header", ",", "data_key", ")", ":", "_LOGGER", ".", "debug", "(", "\"Starting header validation\"", ")", "try", ":", "decrypt", "(", "algorithm", "=", "header", ".", "algorithm", ",", "key", "=", "data_key", ",", "encrypted_data", "=", "EncryptedData", "(", "header_auth", ".", "iv", ",", "b\"\"", ",", "header_auth", ".", "tag", ")", ",", "associated_data", "=", "raw_header", ",", ")", "except", "InvalidTag", ":", "raise", "SerializationError", "(", "\"Header authorization failed\"", ")" ]
Validates the header using the header authentication data. :param header: Deserialized header :type header: aws_encryption_sdk.structures.MessageHeader :param header_auth: Deserialized header auth :type header_auth: aws_encryption_sdk.internal.structures.MessageHeaderAuthentication :type stream: io.BytesIO :param bytes raw_header: Raw header bytes :param bytes data_key: Data key with which to perform validation :raises SerializationError: if header authorization fails
[ "Validates", "the", "header", "using", "the", "header", "authentication", "data", "." ]
python
train
fulfilio/python-magento
magento/catalog.py
https://github.com/fulfilio/python-magento/blob/720ec136a6e438a9ee4ee92848a9820b91732750/magento/catalog.py#L253-L270
def info(self, product, store_view=None, attributes=None, identifierType=None): """ Retrieve product data :param product: ID or SKU of product :param store_view: ID or Code of store view :param attributes: List of fields required :param identifierType: Defines whether the product or SKU value is passed in the "product" parameter. :return: `dict` of values """ return self.call( 'catalog_product.info', [ product, store_view, attributes, identifierType ] )
[ "def", "info", "(", "self", ",", "product", ",", "store_view", "=", "None", ",", "attributes", "=", "None", ",", "identifierType", "=", "None", ")", ":", "return", "self", ".", "call", "(", "'catalog_product.info'", ",", "[", "product", ",", "store_view", ",", "attributes", ",", "identifierType", "]", ")" ]
Retrieve product data :param product: ID or SKU of product :param store_view: ID or Code of store view :param attributes: List of fields required :param identifierType: Defines whether the product or SKU value is passed in the "product" parameter. :return: `dict` of values
[ "Retrieve", "product", "data" ]
python
train
dereneaton/ipyrad
ipyrad/analysis/tetrad2.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/tetrad2.py#L1169-L1278
def store_equal(self): """ Takes a tetrad class object and populates array with random quartets sampled equally among splits of the tree so that deep splits are not overrepresented relative to rare splits, like those near the tips. """ with h5py.File(self.database.input, 'a') as io5: fillsets = io5["quartets"] ## require guidetree if not os.path.exists(self.files.tree): raise IPyradWarningExit( "To use sampling method 'equal' requires a guidetree") tre = ete3.Tree(self.files.tree) tre.unroot() tre.resolve_polytomy(recursive=True) ## randomly sample internals splits splits = [([self.samples.index(z.name) for z in i], [self.samples.index(z.name) for z in j]) \ for (i, j) in tre.get_edges()] ## only keep internal splits, not single tip edges splits = [i for i in splits if all([len(j) > 1 for j in i])] ## how many min quartets shoudl be equally sampled from each split squarts = self.params.nquartets // len(splits) ## keep track of how many iterators are saturable. saturable = 0 ## turn each into an iterable split sampler ## if the nquartets for that split is small, then sample all, ## if it is big then make it a random sampler for that split. qiters = [] ## iterate over splits sampling quartets evenly for idx, split in enumerate(splits): ## if small number at this split then sample all possible sets ## we will exhaust this quickly and then switch to random for ## the larger splits. total = n_choose_k(len(split[0]), 2) * n_choose_k(len(split[1]), 2) if total < squarts*2: qiter = (i+j for (i, j) in itertools.product( itertools.combinations(split[0], 2), itertools.combinations(split[1], 2))) saturable += 1 ## else create random sampler across that split, this is slower ## because it can propose the same split repeatedly and so we ## have to check it against the 'sampled' set. else: qiter = (random_product(split[0], split[1]) for _ \ in xrange(self.params.nquartets)) ## store all iterators into a list qiters.append((idx, qiter)) ## create infinite cycler of qiters qitercycle = itertools.cycle(qiters) ## store visited quartets sampled = set() ## fill chunksize at a time i = 0 empty = set() edge_targeted = 0 random_targeted = 0 ## keep filling quartets until nquartets are sampled. while i < self.params.nquartets: ## grab the next iterator cycle, qiter = qitercycle.next() ## sample from iterators, store sorted set. try: qrtsamp = tuple(sorted(qiter.next())) if qrtsamp not in sampled: sampled.add(qrtsamp) edge_targeted += 1 i += 1 ## print progress bar update to engine stdout if not i % self._chunksize: print(min(i, self.params.nquartets)) except StopIteration: empty.add(cycle) if len(empty) == saturable: break ## if array is not full then add random samples while i <= self.params.nquartets: newset = tuple(sorted(np.random.choice( range(len(self.samples)), 4, replace=False))) if newset not in sampled: sampled.add(newset) random_targeted += 1 i += 1 ## print progress bar update to engine stdout if not i % self._chunksize: print(min(i, self.params.nquartets)) ## store into database print(self.params.nquartets) fillsets[:] = np.array(tuple(sampled)) del sampled
[ "def", "store_equal", "(", "self", ")", ":", "with", "h5py", ".", "File", "(", "self", ".", "database", ".", "input", ",", "'a'", ")", "as", "io5", ":", "fillsets", "=", "io5", "[", "\"quartets\"", "]", "## require guidetree", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "files", ".", "tree", ")", ":", "raise", "IPyradWarningExit", "(", "\"To use sampling method 'equal' requires a guidetree\"", ")", "tre", "=", "ete3", ".", "Tree", "(", "self", ".", "files", ".", "tree", ")", "tre", ".", "unroot", "(", ")", "tre", ".", "resolve_polytomy", "(", "recursive", "=", "True", ")", "## randomly sample internals splits", "splits", "=", "[", "(", "[", "self", ".", "samples", ".", "index", "(", "z", ".", "name", ")", "for", "z", "in", "i", "]", ",", "[", "self", ".", "samples", ".", "index", "(", "z", ".", "name", ")", "for", "z", "in", "j", "]", ")", "for", "(", "i", ",", "j", ")", "in", "tre", ".", "get_edges", "(", ")", "]", "## only keep internal splits, not single tip edges", "splits", "=", "[", "i", "for", "i", "in", "splits", "if", "all", "(", "[", "len", "(", "j", ")", ">", "1", "for", "j", "in", "i", "]", ")", "]", "## how many min quartets shoudl be equally sampled from each split", "squarts", "=", "self", ".", "params", ".", "nquartets", "//", "len", "(", "splits", ")", "## keep track of how many iterators are saturable.", "saturable", "=", "0", "## turn each into an iterable split sampler", "## if the nquartets for that split is small, then sample all, ", "## if it is big then make it a random sampler for that split.", "qiters", "=", "[", "]", "## iterate over splits sampling quartets evenly", "for", "idx", ",", "split", "in", "enumerate", "(", "splits", ")", ":", "## if small number at this split then sample all possible sets", "## we will exhaust this quickly and then switch to random for ", "## the larger splits.", "total", "=", "n_choose_k", "(", "len", "(", "split", "[", "0", "]", ")", ",", "2", ")", "*", "n_choose_k", "(", "len", "(", "split", "[", "1", "]", ")", ",", "2", ")", "if", "total", "<", "squarts", "*", "2", ":", "qiter", "=", "(", "i", "+", "j", "for", "(", "i", ",", "j", ")", "in", "itertools", ".", "product", "(", "itertools", ".", "combinations", "(", "split", "[", "0", "]", ",", "2", ")", ",", "itertools", ".", "combinations", "(", "split", "[", "1", "]", ",", "2", ")", ")", ")", "saturable", "+=", "1", "## else create random sampler across that split, this is slower", "## because it can propose the same split repeatedly and so we ", "## have to check it against the 'sampled' set.", "else", ":", "qiter", "=", "(", "random_product", "(", "split", "[", "0", "]", ",", "split", "[", "1", "]", ")", "for", "_", "in", "xrange", "(", "self", ".", "params", ".", "nquartets", ")", ")", "## store all iterators into a list", "qiters", ".", "append", "(", "(", "idx", ",", "qiter", ")", ")", "## create infinite cycler of qiters", "qitercycle", "=", "itertools", ".", "cycle", "(", "qiters", ")", "## store visited quartets", "sampled", "=", "set", "(", ")", "## fill chunksize at a time", "i", "=", "0", "empty", "=", "set", "(", ")", "edge_targeted", "=", "0", "random_targeted", "=", "0", "## keep filling quartets until nquartets are sampled.", "while", "i", "<", "self", ".", "params", ".", "nquartets", ":", "## grab the next iterator", "cycle", ",", "qiter", "=", "qitercycle", ".", "next", "(", ")", "## sample from iterators, store sorted set.", "try", ":", "qrtsamp", "=", "tuple", "(", "sorted", "(", "qiter", ".", "next", "(", ")", ")", ")", "if", "qrtsamp", "not", "in", "sampled", ":", "sampled", ".", "add", "(", "qrtsamp", ")", "edge_targeted", "+=", "1", "i", "+=", "1", "## print progress bar update to engine stdout", "if", "not", "i", "%", "self", ".", "_chunksize", ":", "print", "(", "min", "(", "i", ",", "self", ".", "params", ".", "nquartets", ")", ")", "except", "StopIteration", ":", "empty", ".", "add", "(", "cycle", ")", "if", "len", "(", "empty", ")", "==", "saturable", ":", "break", "## if array is not full then add random samples", "while", "i", "<=", "self", ".", "params", ".", "nquartets", ":", "newset", "=", "tuple", "(", "sorted", "(", "np", ".", "random", ".", "choice", "(", "range", "(", "len", "(", "self", ".", "samples", ")", ")", ",", "4", ",", "replace", "=", "False", ")", ")", ")", "if", "newset", "not", "in", "sampled", ":", "sampled", ".", "add", "(", "newset", ")", "random_targeted", "+=", "1", "i", "+=", "1", "## print progress bar update to engine stdout", "if", "not", "i", "%", "self", ".", "_chunksize", ":", "print", "(", "min", "(", "i", ",", "self", ".", "params", ".", "nquartets", ")", ")", "## store into database", "print", "(", "self", ".", "params", ".", "nquartets", ")", "fillsets", "[", ":", "]", "=", "np", ".", "array", "(", "tuple", "(", "sampled", ")", ")", "del", "sampled" ]
Takes a tetrad class object and populates array with random quartets sampled equally among splits of the tree so that deep splits are not overrepresented relative to rare splits, like those near the tips.
[ "Takes", "a", "tetrad", "class", "object", "and", "populates", "array", "with", "random", "quartets", "sampled", "equally", "among", "splits", "of", "the", "tree", "so", "that", "deep", "splits", "are", "not", "overrepresented", "relative", "to", "rare", "splits", "like", "those", "near", "the", "tips", "." ]
python
valid
adafruit/Adafruit_Python_MCP9808
Adafruit_MCP9808/MCP9808.py
https://github.com/adafruit/Adafruit_Python_MCP9808/blob/5524605a15cfce5668f259de72c88d5be74565f4/Adafruit_MCP9808/MCP9808.py#L67-L76
def begin(self): """Start taking temperature measurements. Returns True if the device is intialized, False otherwise. """ # Check manufacturer and device ID match expected values. mid = self._device.readU16BE(MCP9808_REG_MANUF_ID) did = self._device.readU16BE(MCP9808_REG_DEVICE_ID) self._logger.debug('Read manufacturer ID: {0:04X}'.format(mid)) self._logger.debug('Read device ID: {0:04X}'.format(did)) return mid == 0x0054 and did == 0x0400
[ "def", "begin", "(", "self", ")", ":", "# Check manufacturer and device ID match expected values.", "mid", "=", "self", ".", "_device", ".", "readU16BE", "(", "MCP9808_REG_MANUF_ID", ")", "did", "=", "self", ".", "_device", ".", "readU16BE", "(", "MCP9808_REG_DEVICE_ID", ")", "self", ".", "_logger", ".", "debug", "(", "'Read manufacturer ID: {0:04X}'", ".", "format", "(", "mid", ")", ")", "self", ".", "_logger", ".", "debug", "(", "'Read device ID: {0:04X}'", ".", "format", "(", "did", ")", ")", "return", "mid", "==", "0x0054", "and", "did", "==", "0x0400" ]
Start taking temperature measurements. Returns True if the device is intialized, False otherwise.
[ "Start", "taking", "temperature", "measurements", ".", "Returns", "True", "if", "the", "device", "is", "intialized", "False", "otherwise", "." ]
python
train
yougov/pmxbot
pmxbot/commands.py
https://github.com/yougov/pmxbot/blob/5da84a3258a0fd73cb35b60e39769a5d7bfb2ba7/pmxbot/commands.py#L28-L51
def google(rest): "Look up a phrase on google" API_URL = 'https://www.googleapis.com/customsearch/v1?' try: key = pmxbot.config['Google API key'] except KeyError: return "Configure 'Google API key' in config" # Use a custom search that searches everything normally # http://stackoverflow.com/a/11206266/70170 custom_search = '004862762669074674786:hddvfu0gyg0' params = dict( key=key, cx=custom_search, q=rest.strip(), ) url = API_URL + urllib.parse.urlencode(params) resp = requests.get(url) resp.raise_for_status() results = resp.json() hit1 = next(iter(results['items'])) return ' - '.join(( urllib.parse.unquote(hit1['link']), hit1['title'], ))
[ "def", "google", "(", "rest", ")", ":", "API_URL", "=", "'https://www.googleapis.com/customsearch/v1?'", "try", ":", "key", "=", "pmxbot", ".", "config", "[", "'Google API key'", "]", "except", "KeyError", ":", "return", "\"Configure 'Google API key' in config\"", "# Use a custom search that searches everything normally", "# http://stackoverflow.com/a/11206266/70170", "custom_search", "=", "'004862762669074674786:hddvfu0gyg0'", "params", "=", "dict", "(", "key", "=", "key", ",", "cx", "=", "custom_search", ",", "q", "=", "rest", ".", "strip", "(", ")", ",", ")", "url", "=", "API_URL", "+", "urllib", ".", "parse", ".", "urlencode", "(", "params", ")", "resp", "=", "requests", ".", "get", "(", "url", ")", "resp", ".", "raise_for_status", "(", ")", "results", "=", "resp", ".", "json", "(", ")", "hit1", "=", "next", "(", "iter", "(", "results", "[", "'items'", "]", ")", ")", "return", "' - '", ".", "join", "(", "(", "urllib", ".", "parse", ".", "unquote", "(", "hit1", "[", "'link'", "]", ")", ",", "hit1", "[", "'title'", "]", ",", ")", ")" ]
Look up a phrase on google
[ "Look", "up", "a", "phrase", "on", "google" ]
python
train
timstaley/voeventdb
voeventdb/server/database/models.py
https://github.com/timstaley/voeventdb/blob/e37b176d65fced4ca4f059109a95d6974bb8a091/voeventdb/server/database/models.py#L18-L26
def _grab_xpath(root, xpath, converter=lambda x: x): """ XML convenience - grabs the first element at xpath if present, else returns None. """ elements = root.xpath(xpath) if elements: return converter(str(elements[0])) else: return None
[ "def", "_grab_xpath", "(", "root", ",", "xpath", ",", "converter", "=", "lambda", "x", ":", "x", ")", ":", "elements", "=", "root", ".", "xpath", "(", "xpath", ")", "if", "elements", ":", "return", "converter", "(", "str", "(", "elements", "[", "0", "]", ")", ")", "else", ":", "return", "None" ]
XML convenience - grabs the first element at xpath if present, else returns None.
[ "XML", "convenience", "-", "grabs", "the", "first", "element", "at", "xpath", "if", "present", "else", "returns", "None", "." ]
python
train
vinci1it2000/schedula
schedula/utils/dsp.py
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/dsp.py#L275-L334
def selector(keys, dictionary, copy=False, output_type='dict', allow_miss=False): """ Selects the chosen dictionary keys from the given dictionary. :param keys: Keys to select. :type keys: list, tuple, set :param dictionary: A dictionary. :type dictionary: dict :param copy: If True the output contains deep-copies of the values. :type copy: bool :param output_type: Type of function output: + 'list': a list with all values listed in `keys`. + 'dict': a dictionary with any outputs listed in `keys`. + 'values': if output length == 1 return a single value otherwise a tuple with all values listed in `keys`. :type output_type: str, optional :param allow_miss: If True it does not raise when some key is missing in the dictionary. :type allow_miss: bool :return: A dictionary with chosen dictionary keys if present in the sequence of dictionaries. These are combined with :func:`combine_dicts`. :rtype: dict Example:: >>> from functools import partial >>> fun = partial(selector, ['a', 'b']) >>> sorted(fun({'a': 1, 'b': 2, 'c': 3}).items()) [('a', 1), ('b', 2)] """ if not allow_miss: # noinspection PyUnusedLocal def check(key): return True else: def check(key): return key in dictionary if output_type == 'list': # Select as list. res = [dictionary[k] for k in keys if check(k)] return _copy.deepcopy(res) if copy else res elif output_type == 'values': return bypass(*[dictionary[k] for k in keys if check(k)], copy=copy) # Select as dict. return bypass({k: dictionary[k] for k in keys if check(k)}, copy=copy)
[ "def", "selector", "(", "keys", ",", "dictionary", ",", "copy", "=", "False", ",", "output_type", "=", "'dict'", ",", "allow_miss", "=", "False", ")", ":", "if", "not", "allow_miss", ":", "# noinspection PyUnusedLocal", "def", "check", "(", "key", ")", ":", "return", "True", "else", ":", "def", "check", "(", "key", ")", ":", "return", "key", "in", "dictionary", "if", "output_type", "==", "'list'", ":", "# Select as list.", "res", "=", "[", "dictionary", "[", "k", "]", "for", "k", "in", "keys", "if", "check", "(", "k", ")", "]", "return", "_copy", ".", "deepcopy", "(", "res", ")", "if", "copy", "else", "res", "elif", "output_type", "==", "'values'", ":", "return", "bypass", "(", "*", "[", "dictionary", "[", "k", "]", "for", "k", "in", "keys", "if", "check", "(", "k", ")", "]", ",", "copy", "=", "copy", ")", "# Select as dict.", "return", "bypass", "(", "{", "k", ":", "dictionary", "[", "k", "]", "for", "k", "in", "keys", "if", "check", "(", "k", ")", "}", ",", "copy", "=", "copy", ")" ]
Selects the chosen dictionary keys from the given dictionary. :param keys: Keys to select. :type keys: list, tuple, set :param dictionary: A dictionary. :type dictionary: dict :param copy: If True the output contains deep-copies of the values. :type copy: bool :param output_type: Type of function output: + 'list': a list with all values listed in `keys`. + 'dict': a dictionary with any outputs listed in `keys`. + 'values': if output length == 1 return a single value otherwise a tuple with all values listed in `keys`. :type output_type: str, optional :param allow_miss: If True it does not raise when some key is missing in the dictionary. :type allow_miss: bool :return: A dictionary with chosen dictionary keys if present in the sequence of dictionaries. These are combined with :func:`combine_dicts`. :rtype: dict Example:: >>> from functools import partial >>> fun = partial(selector, ['a', 'b']) >>> sorted(fun({'a': 1, 'b': 2, 'c': 3}).items()) [('a', 1), ('b', 2)]
[ "Selects", "the", "chosen", "dictionary", "keys", "from", "the", "given", "dictionary", "." ]
python
train
LLNL/scraper
scraper/util.py
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/util.py#L209-L220
def _prune_dict_null_str(dictionary): """ Prune the "None" or emptry string values from dictionary items """ for key, value in list(dictionary.items()): if value is None or str(value) == '': del dictionary[key] if isinstance(value, dict): dictionary[key] = _prune_dict_null_str(dictionary[key]) return dictionary
[ "def", "_prune_dict_null_str", "(", "dictionary", ")", ":", "for", "key", ",", "value", "in", "list", "(", "dictionary", ".", "items", "(", ")", ")", ":", "if", "value", "is", "None", "or", "str", "(", "value", ")", "==", "''", ":", "del", "dictionary", "[", "key", "]", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "dictionary", "[", "key", "]", "=", "_prune_dict_null_str", "(", "dictionary", "[", "key", "]", ")", "return", "dictionary" ]
Prune the "None" or emptry string values from dictionary items
[ "Prune", "the", "None", "or", "emptry", "string", "values", "from", "dictionary", "items" ]
python
test
elifesciences/elife-tools
elifetools/utils.py
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/utils.py#L268-L282
def node_contents_str(tag): """ Return the contents of a tag, including it's children, as a string. Does not include the root/parent of the tag. """ if not tag: return None tag_string = '' for child_tag in tag.children: if isinstance(child_tag, Comment): # BeautifulSoup does not preserve comment tags, add them back tag_string += '<!--%s-->' % unicode_value(child_tag) else: tag_string += unicode_value(child_tag) return tag_string if tag_string != '' else None
[ "def", "node_contents_str", "(", "tag", ")", ":", "if", "not", "tag", ":", "return", "None", "tag_string", "=", "''", "for", "child_tag", "in", "tag", ".", "children", ":", "if", "isinstance", "(", "child_tag", ",", "Comment", ")", ":", "# BeautifulSoup does not preserve comment tags, add them back", "tag_string", "+=", "'<!--%s-->'", "%", "unicode_value", "(", "child_tag", ")", "else", ":", "tag_string", "+=", "unicode_value", "(", "child_tag", ")", "return", "tag_string", "if", "tag_string", "!=", "''", "else", "None" ]
Return the contents of a tag, including it's children, as a string. Does not include the root/parent of the tag.
[ "Return", "the", "contents", "of", "a", "tag", "including", "it", "s", "children", "as", "a", "string", ".", "Does", "not", "include", "the", "root", "/", "parent", "of", "the", "tag", "." ]
python
train