Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
8,200
inveniosoftware/invenio-communities
invenio_communities/views/ui.py
curate
def curate(community): """Index page with uploader and list of existing depositions. :param community_id: ID of the community to curate. """ if request.method == 'POST': action = request.json.get('action') recid = request.json.get('recid') # 'recid' is mandatory if not recid: abort(400) if action not in ['accept', 'reject', 'remove']: abort(400) # Resolve recid to a Record resolver = Resolver( pid_type='recid', object_type='rec', getter=Record.get_record) pid, record = resolver.resolve(recid) # Perform actions if action == "accept": community.accept_record(record) elif action == "reject": community.reject_record(record) elif action == "remove": community.remove_record(record) record.commit() db.session.commit() RecordIndexer().index_by_id(record.id) return jsonify({'status': 'success'}) ctx = {'community': community} return render_template( current_app.config['COMMUNITIES_CURATE_TEMPLATE'], **ctx )
python
def curate(community): """Index page with uploader and list of existing depositions. :param community_id: ID of the community to curate. """ if request.method == 'POST': action = request.json.get('action') recid = request.json.get('recid') # 'recid' is mandatory if not recid: abort(400) if action not in ['accept', 'reject', 'remove']: abort(400) # Resolve recid to a Record resolver = Resolver( pid_type='recid', object_type='rec', getter=Record.get_record) pid, record = resolver.resolve(recid) # Perform actions if action == "accept": community.accept_record(record) elif action == "reject": community.reject_record(record) elif action == "remove": community.remove_record(record) record.commit() db.session.commit() RecordIndexer().index_by_id(record.id) return jsonify({'status': 'success'}) ctx = {'community': community} return render_template( current_app.config['COMMUNITIES_CURATE_TEMPLATE'], **ctx )
['def', 'curate', '(', 'community', ')', ':', 'if', 'request', '.', 'method', '==', "'POST'", ':', 'action', '=', 'request', '.', 'json', '.', 'get', '(', "'action'", ')', 'recid', '=', 'request', '.', 'json', '.', 'get', '(', "'recid'", ')', "# 'recid' is mandatory", 'if', 'not', 'recid', ':', 'abort', '(', '400', ')', 'if', 'action', 'not', 'in', '[', "'accept'", ',', "'reject'", ',', "'remove'", ']', ':', 'abort', '(', '400', ')', '# Resolve recid to a Record', 'resolver', '=', 'Resolver', '(', 'pid_type', '=', "'recid'", ',', 'object_type', '=', "'rec'", ',', 'getter', '=', 'Record', '.', 'get_record', ')', 'pid', ',', 'record', '=', 'resolver', '.', 'resolve', '(', 'recid', ')', '# Perform actions', 'if', 'action', '==', '"accept"', ':', 'community', '.', 'accept_record', '(', 'record', ')', 'elif', 'action', '==', '"reject"', ':', 'community', '.', 'reject_record', '(', 'record', ')', 'elif', 'action', '==', '"remove"', ':', 'community', '.', 'remove_record', '(', 'record', ')', 'record', '.', 'commit', '(', ')', 'db', '.', 'session', '.', 'commit', '(', ')', 'RecordIndexer', '(', ')', '.', 'index_by_id', '(', 'record', '.', 'id', ')', 'return', 'jsonify', '(', '{', "'status'", ':', "'success'", '}', ')', 'ctx', '=', '{', "'community'", ':', 'community', '}', 'return', 'render_template', '(', 'current_app', '.', 'config', '[', "'COMMUNITIES_CURATE_TEMPLATE'", ']', ',', '*', '*', 'ctx', ')']
Index page with uploader and list of existing depositions. :param community_id: ID of the community to curate.
['Index', 'page', 'with', 'uploader', 'and', 'list', 'of', 'existing', 'depositions', '.']
train
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/ui.py#L278-L315
8,201
rosenbrockc/fortpy
fortpy/elements.py
Module.completions
def completions(self, symbol, attribute, recursive = False): """Finds all possible symbol completions of the given symbol that belong to this module and its dependencies. :arg symbol: the code symbol that needs to be completed. :arg attribute: one of ['dependencies', 'publics', 'members', 'types', 'executables'] for specifying which collections to search. :arg result: the possible completions collected so far in the search. """ possible = [] for ekey in self.collection(attribute): if symbol in ekey: possible.append(ekey) #Try this out on all the dependencies as well to find all the possible #completions. if recursive: for depkey in self.dependencies: #Completions need to be fast. If the module for the parent isn't already #loaded, we just ignore the completions it might have. if depkey in self.parent.modules: possible.extend(self.parent.modules[depkey].completions(symbol, attribute)) return possible
python
def completions(self, symbol, attribute, recursive = False): """Finds all possible symbol completions of the given symbol that belong to this module and its dependencies. :arg symbol: the code symbol that needs to be completed. :arg attribute: one of ['dependencies', 'publics', 'members', 'types', 'executables'] for specifying which collections to search. :arg result: the possible completions collected so far in the search. """ possible = [] for ekey in self.collection(attribute): if symbol in ekey: possible.append(ekey) #Try this out on all the dependencies as well to find all the possible #completions. if recursive: for depkey in self.dependencies: #Completions need to be fast. If the module for the parent isn't already #loaded, we just ignore the completions it might have. if depkey in self.parent.modules: possible.extend(self.parent.modules[depkey].completions(symbol, attribute)) return possible
['def', 'completions', '(', 'self', ',', 'symbol', ',', 'attribute', ',', 'recursive', '=', 'False', ')', ':', 'possible', '=', '[', ']', 'for', 'ekey', 'in', 'self', '.', 'collection', '(', 'attribute', ')', ':', 'if', 'symbol', 'in', 'ekey', ':', 'possible', '.', 'append', '(', 'ekey', ')', '#Try this out on all the dependencies as well to find all the possible', '#completions.', 'if', 'recursive', ':', 'for', 'depkey', 'in', 'self', '.', 'dependencies', ':', "#Completions need to be fast. If the module for the parent isn't already", '#loaded, we just ignore the completions it might have.', 'if', 'depkey', 'in', 'self', '.', 'parent', '.', 'modules', ':', 'possible', '.', 'extend', '(', 'self', '.', 'parent', '.', 'modules', '[', 'depkey', ']', '.', 'completions', '(', 'symbol', ',', 'attribute', ')', ')', 'return', 'possible']
Finds all possible symbol completions of the given symbol that belong to this module and its dependencies. :arg symbol: the code symbol that needs to be completed. :arg attribute: one of ['dependencies', 'publics', 'members', 'types', 'executables'] for specifying which collections to search. :arg result: the possible completions collected so far in the search.
['Finds', 'all', 'possible', 'symbol', 'completions', 'of', 'the', 'given', 'symbol', 'that', 'belong', 'to', 'this', 'module', 'and', 'its', 'dependencies', '.']
train
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L1577-L1600
8,202
rgs1/zk_shell
zk_shell/shell.py
Shell.do_rm
def do_rm(self, params): """ \x1b[1mNAME\x1b[0m rm - Remove the znode \x1b[1mSYNOPSIS\x1b[0m rm <path> [path] [path] ... [path] \x1b[1mEXAMPLES\x1b[0m > rm /foo > rm /foo /bar """ for path in params.paths: try: self.client_context.delete(path) except NotEmptyError: self.show_output("%s is not empty.", path) except NoNodeError: self.show_output("%s doesn't exist.", path)
python
def do_rm(self, params): """ \x1b[1mNAME\x1b[0m rm - Remove the znode \x1b[1mSYNOPSIS\x1b[0m rm <path> [path] [path] ... [path] \x1b[1mEXAMPLES\x1b[0m > rm /foo > rm /foo /bar """ for path in params.paths: try: self.client_context.delete(path) except NotEmptyError: self.show_output("%s is not empty.", path) except NoNodeError: self.show_output("%s doesn't exist.", path)
['def', 'do_rm', '(', 'self', ',', 'params', ')', ':', 'for', 'path', 'in', 'params', '.', 'paths', ':', 'try', ':', 'self', '.', 'client_context', '.', 'delete', '(', 'path', ')', 'except', 'NotEmptyError', ':', 'self', '.', 'show_output', '(', '"%s is not empty."', ',', 'path', ')', 'except', 'NoNodeError', ':', 'self', '.', 'show_output', '(', '"%s doesn\'t exist."', ',', 'path', ')']
\x1b[1mNAME\x1b[0m rm - Remove the znode \x1b[1mSYNOPSIS\x1b[0m rm <path> [path] [path] ... [path] \x1b[1mEXAMPLES\x1b[0m > rm /foo > rm /foo /bar
['\\', 'x1b', '[', '1mNAME', '\\', 'x1b', '[', '0m', 'rm', '-', 'Remove', 'the', 'znode']
train
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/shell.py#L1309-L1328
8,203
stevearc/dql
dql/cli.py
DQLClient.do_watch
def do_watch(self, *args): """ Watch Dynamo tables consumed capacity """ tables = [] if not self.engine.cached_descriptions: self.engine.describe_all() all_tables = list(self.engine.cached_descriptions) for arg in args: candidates = set((t for t in all_tables if fnmatch(t, arg))) for t in sorted(candidates): if t not in tables: tables.append(t) mon = Monitor(self.engine, tables) mon.start()
python
def do_watch(self, *args): """ Watch Dynamo tables consumed capacity """ tables = [] if not self.engine.cached_descriptions: self.engine.describe_all() all_tables = list(self.engine.cached_descriptions) for arg in args: candidates = set((t for t in all_tables if fnmatch(t, arg))) for t in sorted(candidates): if t not in tables: tables.append(t) mon = Monitor(self.engine, tables) mon.start()
['def', 'do_watch', '(', 'self', ',', '*', 'args', ')', ':', 'tables', '=', '[', ']', 'if', 'not', 'self', '.', 'engine', '.', 'cached_descriptions', ':', 'self', '.', 'engine', '.', 'describe_all', '(', ')', 'all_tables', '=', 'list', '(', 'self', '.', 'engine', '.', 'cached_descriptions', ')', 'for', 'arg', 'in', 'args', ':', 'candidates', '=', 'set', '(', '(', 't', 'for', 't', 'in', 'all_tables', 'if', 'fnmatch', '(', 't', ',', 'arg', ')', ')', ')', 'for', 't', 'in', 'sorted', '(', 'candidates', ')', ':', 'if', 't', 'not', 'in', 'tables', ':', 'tables', '.', 'append', '(', 't', ')', 'mon', '=', 'Monitor', '(', 'self', '.', 'engine', ',', 'tables', ')', 'mon', '.', 'start', '(', ')']
Watch Dynamo tables consumed capacity
['Watch', 'Dynamo', 'tables', 'consumed', 'capacity']
train
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/cli.py#L400-L413
8,204
nuagenetworks/bambou
bambou/nurest_object.py
NURESTObject.children_rest_names
def children_rest_names(self): """ Gets the list of all possible children ReST names. Returns: list: list containing all possible rest names as string Example: >>> entity = NUEntity() >>> entity.children_rest_names ["foo", "bar"] """ names = [] for fetcher in self.fetchers: names.append(fetcher.__class__.managed_object_rest_name()) return names
python
def children_rest_names(self): """ Gets the list of all possible children ReST names. Returns: list: list containing all possible rest names as string Example: >>> entity = NUEntity() >>> entity.children_rest_names ["foo", "bar"] """ names = [] for fetcher in self.fetchers: names.append(fetcher.__class__.managed_object_rest_name()) return names
['def', 'children_rest_names', '(', 'self', ')', ':', 'names', '=', '[', ']', 'for', 'fetcher', 'in', 'self', '.', 'fetchers', ':', 'names', '.', 'append', '(', 'fetcher', '.', '__class__', '.', 'managed_object_rest_name', '(', ')', ')', 'return', 'names']
Gets the list of all possible children ReST names. Returns: list: list containing all possible rest names as string Example: >>> entity = NUEntity() >>> entity.children_rest_names ["foo", "bar"]
['Gets', 'the', 'list', 'of', 'all', 'possible', 'children', 'ReST', 'names', '.']
train
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_object.py#L266-L283
8,205
santosjorge/cufflinks
cufflinks/quant_figure.py
QuantFig.update
def update(self,**kwargs): """ Updates the values for a QuantFigure The key-values are automatically assigned to the correct section of the QuantFigure """ if 'columns' in kwargs: self._d=ta._ohlc_dict(self.df,columns=kwargs.pop('columns',None)) schema=self._get_schema() annotations=kwargs.pop('annotations',None) if annotations: self.layout['annotations']['values']=utils.make_list(annotations) for k,v in list(kwargs.items()): try: utils.dict_update(self.__dict__,k,v,schema) except: self.kwargs.update({k:v})
python
def update(self,**kwargs): """ Updates the values for a QuantFigure The key-values are automatically assigned to the correct section of the QuantFigure """ if 'columns' in kwargs: self._d=ta._ohlc_dict(self.df,columns=kwargs.pop('columns',None)) schema=self._get_schema() annotations=kwargs.pop('annotations',None) if annotations: self.layout['annotations']['values']=utils.make_list(annotations) for k,v in list(kwargs.items()): try: utils.dict_update(self.__dict__,k,v,schema) except: self.kwargs.update({k:v})
['def', 'update', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', "'columns'", 'in', 'kwargs', ':', 'self', '.', '_d', '=', 'ta', '.', '_ohlc_dict', '(', 'self', '.', 'df', ',', 'columns', '=', 'kwargs', '.', 'pop', '(', "'columns'", ',', 'None', ')', ')', 'schema', '=', 'self', '.', '_get_schema', '(', ')', 'annotations', '=', 'kwargs', '.', 'pop', '(', "'annotations'", ',', 'None', ')', 'if', 'annotations', ':', 'self', '.', 'layout', '[', "'annotations'", ']', '[', "'values'", ']', '=', 'utils', '.', 'make_list', '(', 'annotations', ')', 'for', 'k', ',', 'v', 'in', 'list', '(', 'kwargs', '.', 'items', '(', ')', ')', ':', 'try', ':', 'utils', '.', 'dict_update', '(', 'self', '.', '__dict__', ',', 'k', ',', 'v', ',', 'schema', ')', 'except', ':', 'self', '.', 'kwargs', '.', 'update', '(', '{', 'k', ':', 'v', '}', ')']
Updates the values for a QuantFigure The key-values are automatically assigned to the correct section of the QuantFigure
['Updates', 'the', 'values', 'for', 'a', 'QuantFigure', 'The', 'key', '-', 'values', 'are', 'automatically', 'assigned', 'to', 'the', 'correct', 'section', 'of', 'the', 'QuantFigure']
train
https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/quant_figure.py#L186-L203
8,206
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/api.py
get_kbs_info
def get_kbs_info(kbtype="", searchkbname=""): """A convenience method. :param kbtype: type of kb -- get only kb's of this type :param searchkbname: get only kb's where this sting appears in the name """ # query + order by query = models.KnwKB.query.order_by( models.KnwKB.name) # filters if kbtype: query = query.filter_by(kbtype=kbtype) if searchkbname: query = query.filter_by(name=searchkbname) return [row.to_dict() for row in query.all()]
python
def get_kbs_info(kbtype="", searchkbname=""): """A convenience method. :param kbtype: type of kb -- get only kb's of this type :param searchkbname: get only kb's where this sting appears in the name """ # query + order by query = models.KnwKB.query.order_by( models.KnwKB.name) # filters if kbtype: query = query.filter_by(kbtype=kbtype) if searchkbname: query = query.filter_by(name=searchkbname) return [row.to_dict() for row in query.all()]
['def', 'get_kbs_info', '(', 'kbtype', '=', '""', ',', 'searchkbname', '=', '""', ')', ':', '# query + order by', 'query', '=', 'models', '.', 'KnwKB', '.', 'query', '.', 'order_by', '(', 'models', '.', 'KnwKB', '.', 'name', ')', '# filters', 'if', 'kbtype', ':', 'query', '=', 'query', '.', 'filter_by', '(', 'kbtype', '=', 'kbtype', ')', 'if', 'searchkbname', ':', 'query', '=', 'query', '.', 'filter_by', '(', 'name', '=', 'searchkbname', ')', 'return', '[', 'row', '.', 'to_dict', '(', ')', 'for', 'row', 'in', 'query', '.', 'all', '(', ')', ']']
A convenience method. :param kbtype: type of kb -- get only kb's of this type :param searchkbname: get only kb's where this sting appears in the name
['A', 'convenience', 'method', '.']
train
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/api.py#L409-L424
8,207
serge-sans-paille/pythran
pythran/types/conversion.py
pytype_to_ctype
def pytype_to_ctype(t): """ Python -> pythonic type binding. """ if isinstance(t, List): return 'pythonic::types::list<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Set): return 'pythonic::types::set<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Dict): tkey, tvalue = t.__args__ return 'pythonic::types::dict<{0},{1}>'.format(pytype_to_ctype(tkey), pytype_to_ctype(tvalue)) elif isinstance(t, Tuple): return 'decltype(pythonic::types::make_tuple({0}))'.format( ", ".join('std::declval<{}>()'.format(pytype_to_ctype(p)) for p in t.__args__) ) elif isinstance(t, NDArray): dtype = pytype_to_ctype(t.__args__[0]) ndim = len(t.__args__) - 1 shapes = ','.join(('long' if s.stop == -1 or s.stop is None else 'std::integral_constant<long, {}>'.format( s.stop) ) for s in t.__args__[1:]) pshape = 'pythonic::types::pshape<{0}>'.format(shapes) arr = 'pythonic::types::ndarray<{0},{1}>'.format( dtype, pshape) if t.__args__[1].start == -1: return 'pythonic::types::numpy_texpr<{0}>'.format(arr) elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]): slices = ", ".join(['pythonic::types::normalized_slice'] * ndim) return 'pythonic::types::numpy_gexpr<{0},{1}>'.format(arr, slices) else: return arr elif isinstance(t, Pointer): return 'pythonic::types::pointer<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Fun): return 'pythonic::types::cfun<{0}({1})>'.format( pytype_to_ctype(t.__args__[-1]), ", ".join(pytype_to_ctype(arg) for arg in t.__args__[:-1]), ) elif t in PYTYPE_TO_CTYPE_TABLE: return PYTYPE_TO_CTYPE_TABLE[t] else: raise NotImplementedError("{0}:{1}".format(type(t), t))
python
def pytype_to_ctype(t): """ Python -> pythonic type binding. """ if isinstance(t, List): return 'pythonic::types::list<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Set): return 'pythonic::types::set<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Dict): tkey, tvalue = t.__args__ return 'pythonic::types::dict<{0},{1}>'.format(pytype_to_ctype(tkey), pytype_to_ctype(tvalue)) elif isinstance(t, Tuple): return 'decltype(pythonic::types::make_tuple({0}))'.format( ", ".join('std::declval<{}>()'.format(pytype_to_ctype(p)) for p in t.__args__) ) elif isinstance(t, NDArray): dtype = pytype_to_ctype(t.__args__[0]) ndim = len(t.__args__) - 1 shapes = ','.join(('long' if s.stop == -1 or s.stop is None else 'std::integral_constant<long, {}>'.format( s.stop) ) for s in t.__args__[1:]) pshape = 'pythonic::types::pshape<{0}>'.format(shapes) arr = 'pythonic::types::ndarray<{0},{1}>'.format( dtype, pshape) if t.__args__[1].start == -1: return 'pythonic::types::numpy_texpr<{0}>'.format(arr) elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]): slices = ", ".join(['pythonic::types::normalized_slice'] * ndim) return 'pythonic::types::numpy_gexpr<{0},{1}>'.format(arr, slices) else: return arr elif isinstance(t, Pointer): return 'pythonic::types::pointer<{0}>'.format( pytype_to_ctype(t.__args__[0]) ) elif isinstance(t, Fun): return 'pythonic::types::cfun<{0}({1})>'.format( pytype_to_ctype(t.__args__[-1]), ", ".join(pytype_to_ctype(arg) for arg in t.__args__[:-1]), ) elif t in PYTYPE_TO_CTYPE_TABLE: return PYTYPE_TO_CTYPE_TABLE[t] else: raise NotImplementedError("{0}:{1}".format(type(t), t))
['def', 'pytype_to_ctype', '(', 't', ')', ':', 'if', 'isinstance', '(', 't', ',', 'List', ')', ':', 'return', "'pythonic::types::list<{0}>'", '.', 'format', '(', 'pytype_to_ctype', '(', 't', '.', '__args__', '[', '0', ']', ')', ')', 'elif', 'isinstance', '(', 't', ',', 'Set', ')', ':', 'return', "'pythonic::types::set<{0}>'", '.', 'format', '(', 'pytype_to_ctype', '(', 't', '.', '__args__', '[', '0', ']', ')', ')', 'elif', 'isinstance', '(', 't', ',', 'Dict', ')', ':', 'tkey', ',', 'tvalue', '=', 't', '.', '__args__', 'return', "'pythonic::types::dict<{0},{1}>'", '.', 'format', '(', 'pytype_to_ctype', '(', 'tkey', ')', ',', 'pytype_to_ctype', '(', 'tvalue', ')', ')', 'elif', 'isinstance', '(', 't', ',', 'Tuple', ')', ':', 'return', "'decltype(pythonic::types::make_tuple({0}))'", '.', 'format', '(', '", "', '.', 'join', '(', "'std::declval<{}>()'", '.', 'format', '(', 'pytype_to_ctype', '(', 'p', ')', ')', 'for', 'p', 'in', 't', '.', '__args__', ')', ')', 'elif', 'isinstance', '(', 't', ',', 'NDArray', ')', ':', 'dtype', '=', 'pytype_to_ctype', '(', 't', '.', '__args__', '[', '0', ']', ')', 'ndim', '=', 'len', '(', 't', '.', '__args__', ')', '-', '1', 'shapes', '=', "','", '.', 'join', '(', '(', "'long'", 'if', 's', '.', 'stop', '==', '-', '1', 'or', 's', '.', 'stop', 'is', 'None', 'else', "'std::integral_constant<long, {}>'", '.', 'format', '(', 's', '.', 'stop', ')', ')', 'for', 's', 'in', 't', '.', '__args__', '[', '1', ':', ']', ')', 'pshape', '=', "'pythonic::types::pshape<{0}>'", '.', 'format', '(', 'shapes', ')', 'arr', '=', "'pythonic::types::ndarray<{0},{1}>'", '.', 'format', '(', 'dtype', ',', 'pshape', ')', 'if', 't', '.', '__args__', '[', '1', ']', '.', 'start', '==', '-', '1', ':', 'return', "'pythonic::types::numpy_texpr<{0}>'", '.', 'format', '(', 'arr', ')', 'elif', 'any', '(', 's', '.', 'step', 'is', 'not', 'None', 'and', 's', '.', 'step', '<', '0', 'for', 's', 'in', 't', '.', '__args__', '[', '1', ':', ']', ')', ':', 'slices', '=', '", "', '.', 'join', '(', '[', "'pythonic::types::normalized_slice'", ']', '*', 'ndim', ')', 'return', "'pythonic::types::numpy_gexpr<{0},{1}>'", '.', 'format', '(', 'arr', ',', 'slices', ')', 'else', ':', 'return', 'arr', 'elif', 'isinstance', '(', 't', ',', 'Pointer', ')', ':', 'return', "'pythonic::types::pointer<{0}>'", '.', 'format', '(', 'pytype_to_ctype', '(', 't', '.', '__args__', '[', '0', ']', ')', ')', 'elif', 'isinstance', '(', 't', ',', 'Fun', ')', ':', 'return', "'pythonic::types::cfun<{0}({1})>'", '.', 'format', '(', 'pytype_to_ctype', '(', 't', '.', '__args__', '[', '-', '1', ']', ')', ',', '", "', '.', 'join', '(', 'pytype_to_ctype', '(', 'arg', ')', 'for', 'arg', 'in', 't', '.', '__args__', '[', ':', '-', '1', ']', ')', ',', ')', 'elif', 't', 'in', 'PYTYPE_TO_CTYPE_TABLE', ':', 'return', 'PYTYPE_TO_CTYPE_TABLE', '[', 't', ']', 'else', ':', 'raise', 'NotImplementedError', '(', '"{0}:{1}"', '.', 'format', '(', 'type', '(', 't', ')', ',', 't', ')', ')']
Python -> pythonic type binding.
['Python', '-', '>', 'pythonic', 'type', 'binding', '.']
train
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/conversion.py#L43-L92
8,208
skyfielders/python-skyfield
skyfield/timelib.py
Time.utc_datetime_and_leap_second
def utc_datetime_and_leap_second(self): """Convert to a Python ``datetime`` in UTC, plus a leap second value. Convert this time to a `datetime`_ object and a leap second:: dt, leap_second = t.utc_datetime_and_leap_second() If the third-party `pytz`_ package is available, then its ``utc`` timezone will be used as the timezone of the return value. Otherwise, Skyfield uses its own ``utc`` timezone. The leap second value is provided because a Python ``datetime`` can only number seconds ``0`` through ``59``, but leap seconds have a designation of at least ``60``. The leap second return value will normally be ``0``, but will instead be ``1`` if the date and time are a UTC leap second. Add the leap second value to the ``second`` field of the ``datetime`` to learn the real name of the second. If this time is an array, then an array of ``datetime`` objects and an array of leap second integers is returned, instead of a single value each. """ year, month, day, hour, minute, second = self._utc_tuple( _half_millisecond) second, fraction = divmod(second, 1.0) second = second.astype(int) leap_second = second // 60 second -= leap_second milli = (fraction * 1000).astype(int) * 1000 if self.shape: utcs = [utc] * self.shape[0] argsets = zip(year, month, day, hour, minute, second, milli, utcs) dt = array([datetime(*args) for args in argsets]) else: dt = datetime(year, month, day, hour, minute, second, milli, utc) return dt, leap_second
python
def utc_datetime_and_leap_second(self): """Convert to a Python ``datetime`` in UTC, plus a leap second value. Convert this time to a `datetime`_ object and a leap second:: dt, leap_second = t.utc_datetime_and_leap_second() If the third-party `pytz`_ package is available, then its ``utc`` timezone will be used as the timezone of the return value. Otherwise, Skyfield uses its own ``utc`` timezone. The leap second value is provided because a Python ``datetime`` can only number seconds ``0`` through ``59``, but leap seconds have a designation of at least ``60``. The leap second return value will normally be ``0``, but will instead be ``1`` if the date and time are a UTC leap second. Add the leap second value to the ``second`` field of the ``datetime`` to learn the real name of the second. If this time is an array, then an array of ``datetime`` objects and an array of leap second integers is returned, instead of a single value each. """ year, month, day, hour, minute, second = self._utc_tuple( _half_millisecond) second, fraction = divmod(second, 1.0) second = second.astype(int) leap_second = second // 60 second -= leap_second milli = (fraction * 1000).astype(int) * 1000 if self.shape: utcs = [utc] * self.shape[0] argsets = zip(year, month, day, hour, minute, second, milli, utcs) dt = array([datetime(*args) for args in argsets]) else: dt = datetime(year, month, day, hour, minute, second, milli, utc) return dt, leap_second
['def', 'utc_datetime_and_leap_second', '(', 'self', ')', ':', 'year', ',', 'month', ',', 'day', ',', 'hour', ',', 'minute', ',', 'second', '=', 'self', '.', '_utc_tuple', '(', '_half_millisecond', ')', 'second', ',', 'fraction', '=', 'divmod', '(', 'second', ',', '1.0', ')', 'second', '=', 'second', '.', 'astype', '(', 'int', ')', 'leap_second', '=', 'second', '//', '60', 'second', '-=', 'leap_second', 'milli', '=', '(', 'fraction', '*', '1000', ')', '.', 'astype', '(', 'int', ')', '*', '1000', 'if', 'self', '.', 'shape', ':', 'utcs', '=', '[', 'utc', ']', '*', 'self', '.', 'shape', '[', '0', ']', 'argsets', '=', 'zip', '(', 'year', ',', 'month', ',', 'day', ',', 'hour', ',', 'minute', ',', 'second', ',', 'milli', ',', 'utcs', ')', 'dt', '=', 'array', '(', '[', 'datetime', '(', '*', 'args', ')', 'for', 'args', 'in', 'argsets', ']', ')', 'else', ':', 'dt', '=', 'datetime', '(', 'year', ',', 'month', ',', 'day', ',', 'hour', ',', 'minute', ',', 'second', ',', 'milli', ',', 'utc', ')', 'return', 'dt', ',', 'leap_second']
Convert to a Python ``datetime`` in UTC, plus a leap second value. Convert this time to a `datetime`_ object and a leap second:: dt, leap_second = t.utc_datetime_and_leap_second() If the third-party `pytz`_ package is available, then its ``utc`` timezone will be used as the timezone of the return value. Otherwise, Skyfield uses its own ``utc`` timezone. The leap second value is provided because a Python ``datetime`` can only number seconds ``0`` through ``59``, but leap seconds have a designation of at least ``60``. The leap second return value will normally be ``0``, but will instead be ``1`` if the date and time are a UTC leap second. Add the leap second value to the ``second`` field of the ``datetime`` to learn the real name of the second. If this time is an array, then an array of ``datetime`` objects and an array of leap second integers is returned, instead of a single value each.
['Convert', 'to', 'a', 'Python', 'datetime', 'in', 'UTC', 'plus', 'a', 'leap', 'second', 'value', '.']
train
https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/timelib.py#L425-L462
8,209
davesque/django-rest-framework-simplejwt
rest_framework_simplejwt/authentication.py
JWTAuthentication.get_raw_token
def get_raw_token(self, header): """ Extracts an unvalidated JSON web token from the given "Authorization" header value. """ parts = header.split() if len(parts) == 0: # Empty AUTHORIZATION header sent return None if parts[0] not in AUTH_HEADER_TYPE_BYTES: # Assume the header does not contain a JSON web token return None if len(parts) != 2: raise AuthenticationFailed( _('Authorization header must contain two space-delimited values'), code='bad_authorization_header', ) return parts[1]
python
def get_raw_token(self, header): """ Extracts an unvalidated JSON web token from the given "Authorization" header value. """ parts = header.split() if len(parts) == 0: # Empty AUTHORIZATION header sent return None if parts[0] not in AUTH_HEADER_TYPE_BYTES: # Assume the header does not contain a JSON web token return None if len(parts) != 2: raise AuthenticationFailed( _('Authorization header must contain two space-delimited values'), code='bad_authorization_header', ) return parts[1]
['def', 'get_raw_token', '(', 'self', ',', 'header', ')', ':', 'parts', '=', 'header', '.', 'split', '(', ')', 'if', 'len', '(', 'parts', ')', '==', '0', ':', '# Empty AUTHORIZATION header sent', 'return', 'None', 'if', 'parts', '[', '0', ']', 'not', 'in', 'AUTH_HEADER_TYPE_BYTES', ':', '# Assume the header does not contain a JSON web token', 'return', 'None', 'if', 'len', '(', 'parts', ')', '!=', '2', ':', 'raise', 'AuthenticationFailed', '(', '_', '(', "'Authorization header must contain two space-delimited values'", ')', ',', 'code', '=', "'bad_authorization_header'", ',', ')', 'return', 'parts', '[', '1', ']']
Extracts an unvalidated JSON web token from the given "Authorization" header value.
['Extracts', 'an', 'unvalidated', 'JSON', 'web', 'token', 'from', 'the', 'given', 'Authorization', 'header', 'value', '.']
train
https://github.com/davesque/django-rest-framework-simplejwt/blob/d6084c595aefbf97865d15254b56017e710e8e47/rest_framework_simplejwt/authentication.py#L59-L80
8,210
lsbardel/python-stdnet
stdnet/odm/struct.py
OrderedMixin.ipop_range
def ipop_range(self, start=0, stop=-1, callback=None, withscores=True): '''pop a range from the :class:`OrderedMixin`''' backend = self.backend res = backend.structure(self).ipop_range(start, stop, withscores=withscores) if not callback: callback = self.load_data if withscores else self.load_values return backend.execute(res, callback)
python
def ipop_range(self, start=0, stop=-1, callback=None, withscores=True): '''pop a range from the :class:`OrderedMixin`''' backend = self.backend res = backend.structure(self).ipop_range(start, stop, withscores=withscores) if not callback: callback = self.load_data if withscores else self.load_values return backend.execute(res, callback)
['def', 'ipop_range', '(', 'self', ',', 'start', '=', '0', ',', 'stop', '=', '-', '1', ',', 'callback', '=', 'None', ',', 'withscores', '=', 'True', ')', ':', 'backend', '=', 'self', '.', 'backend', 'res', '=', 'backend', '.', 'structure', '(', 'self', ')', '.', 'ipop_range', '(', 'start', ',', 'stop', ',', 'withscores', '=', 'withscores', ')', 'if', 'not', 'callback', ':', 'callback', '=', 'self', '.', 'load_data', 'if', 'withscores', 'else', 'self', '.', 'load_values', 'return', 'backend', '.', 'execute', '(', 'res', ',', 'callback', ')']
pop a range from the :class:`OrderedMixin`
['pop', 'a', 'range', 'from', 'the', ':', 'class', ':', 'OrderedMixin']
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/struct.py#L558-L565
8,211
ray-project/ray
python/ray/profiling.py
Profiler._periodically_flush_profile_events
def _periodically_flush_profile_events(self): """Drivers run this as a thread to flush profile data in the background.""" # Note(rkn): This is run on a background thread in the driver. It uses # the raylet client. This should be ok because it doesn't read # from the raylet client and we have the GIL here. However, # if either of those things changes, then we could run into issues. while True: # Sleep for 1 second. This will be interrupted if # self.threads_stopped is set. self.threads_stopped.wait(timeout=1) # Exit if we received a signal that we should stop. if self.threads_stopped.is_set(): return self.flush_profile_data()
python
def _periodically_flush_profile_events(self): """Drivers run this as a thread to flush profile data in the background.""" # Note(rkn): This is run on a background thread in the driver. It uses # the raylet client. This should be ok because it doesn't read # from the raylet client and we have the GIL here. However, # if either of those things changes, then we could run into issues. while True: # Sleep for 1 second. This will be interrupted if # self.threads_stopped is set. self.threads_stopped.wait(timeout=1) # Exit if we received a signal that we should stop. if self.threads_stopped.is_set(): return self.flush_profile_data()
['def', '_periodically_flush_profile_events', '(', 'self', ')', ':', '# Note(rkn): This is run on a background thread in the driver. It uses', "# the raylet client. This should be ok because it doesn't read", '# from the raylet client and we have the GIL here. However,', '# if either of those things changes, then we could run into issues.', 'while', 'True', ':', '# Sleep for 1 second. This will be interrupted if', '# self.threads_stopped is set.', 'self', '.', 'threads_stopped', '.', 'wait', '(', 'timeout', '=', '1', ')', '# Exit if we received a signal that we should stop.', 'if', 'self', '.', 'threads_stopped', '.', 'is_set', '(', ')', ':', 'return', 'self', '.', 'flush_profile_data', '(', ')']
Drivers run this as a thread to flush profile data in the background.
['Drivers', 'run', 'this', 'as', 'a', 'thread', 'to', 'flush', 'profile', 'data', 'in', 'the', 'background', '.']
train
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/profiling.py#L94-L110
8,212
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py
Base.ReplaceIxes
def ReplaceIxes(self, path, old_prefix, old_suffix, new_prefix, new_suffix): """ Replace old_prefix with new_prefix and old_suffix with new_suffix. env - Environment used to interpolate variables. path - the path that will be modified. old_prefix - construction variable for the old prefix. old_suffix - construction variable for the old suffix. new_prefix - construction variable for the new prefix. new_suffix - construction variable for the new suffix. """ old_prefix = self.subst('$'+old_prefix) old_suffix = self.subst('$'+old_suffix) new_prefix = self.subst('$'+new_prefix) new_suffix = self.subst('$'+new_suffix) dir,name = os.path.split(str(path)) if name[:len(old_prefix)] == old_prefix: name = name[len(old_prefix):] if name[-len(old_suffix):] == old_suffix: name = name[:-len(old_suffix)] return os.path.join(dir, new_prefix+name+new_suffix)
python
def ReplaceIxes(self, path, old_prefix, old_suffix, new_prefix, new_suffix): """ Replace old_prefix with new_prefix and old_suffix with new_suffix. env - Environment used to interpolate variables. path - the path that will be modified. old_prefix - construction variable for the old prefix. old_suffix - construction variable for the old suffix. new_prefix - construction variable for the new prefix. new_suffix - construction variable for the new suffix. """ old_prefix = self.subst('$'+old_prefix) old_suffix = self.subst('$'+old_suffix) new_prefix = self.subst('$'+new_prefix) new_suffix = self.subst('$'+new_suffix) dir,name = os.path.split(str(path)) if name[:len(old_prefix)] == old_prefix: name = name[len(old_prefix):] if name[-len(old_suffix):] == old_suffix: name = name[:-len(old_suffix)] return os.path.join(dir, new_prefix+name+new_suffix)
['def', 'ReplaceIxes', '(', 'self', ',', 'path', ',', 'old_prefix', ',', 'old_suffix', ',', 'new_prefix', ',', 'new_suffix', ')', ':', 'old_prefix', '=', 'self', '.', 'subst', '(', "'$'", '+', 'old_prefix', ')', 'old_suffix', '=', 'self', '.', 'subst', '(', "'$'", '+', 'old_suffix', ')', 'new_prefix', '=', 'self', '.', 'subst', '(', "'$'", '+', 'new_prefix', ')', 'new_suffix', '=', 'self', '.', 'subst', '(', "'$'", '+', 'new_suffix', ')', 'dir', ',', 'name', '=', 'os', '.', 'path', '.', 'split', '(', 'str', '(', 'path', ')', ')', 'if', 'name', '[', ':', 'len', '(', 'old_prefix', ')', ']', '==', 'old_prefix', ':', 'name', '=', 'name', '[', 'len', '(', 'old_prefix', ')', ':', ']', 'if', 'name', '[', '-', 'len', '(', 'old_suffix', ')', ':', ']', '==', 'old_suffix', ':', 'name', '=', 'name', '[', ':', '-', 'len', '(', 'old_suffix', ')', ']', 'return', 'os', '.', 'path', '.', 'join', '(', 'dir', ',', 'new_prefix', '+', 'name', '+', 'new_suffix', ')']
Replace old_prefix with new_prefix and old_suffix with new_suffix. env - Environment used to interpolate variables. path - the path that will be modified. old_prefix - construction variable for the old prefix. old_suffix - construction variable for the old suffix. new_prefix - construction variable for the new prefix. new_suffix - construction variable for the new suffix.
['Replace', 'old_prefix', 'with', 'new_prefix', 'and', 'old_suffix', 'with', 'new_suffix', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py#L1749-L1771
8,213
ansibleplaybookbundle/ansible-playbook-bundle
src/apb/cli.py
subcmd_list_parser
def subcmd_list_parser(subcmd): """ list subcommand """ subcmd.add_argument( '--broker', action='store', dest='broker', help=u'Route to the Ansible Service Broker' ) subcmd.add_argument( '--secure', action='store_true', dest='verify', help=u'Verify SSL connection to Ansible Service Broker', default=False ) subcmd.add_argument( '--ca-path', action='store', dest='cert', help=u'CA cert to use for verifying SSL connection to Ansible Service Broker', default=None ) subcmd.add_argument( '--verbose', '-v', action='store_true', dest='verbose', help=u'Output verbose spec information from Ansible Service Broker', default=False ) subcmd.add_argument( '--output', '-o', action='store', dest='output', help=u'Specify verbose output format in yaml (default) or json', default='optional', choices=['yaml', 'json'] ) subcmd.add_argument( '--username', '-u', action='store', default=None, dest='basic_auth_username', help=u'Specify the basic auth username to be used' ) subcmd.add_argument( '--password', '-p', action='store', default=None, dest='basic_auth_password', help=u'Specify the basic auth password to be used' ) return
python
def subcmd_list_parser(subcmd): """ list subcommand """ subcmd.add_argument( '--broker', action='store', dest='broker', help=u'Route to the Ansible Service Broker' ) subcmd.add_argument( '--secure', action='store_true', dest='verify', help=u'Verify SSL connection to Ansible Service Broker', default=False ) subcmd.add_argument( '--ca-path', action='store', dest='cert', help=u'CA cert to use for verifying SSL connection to Ansible Service Broker', default=None ) subcmd.add_argument( '--verbose', '-v', action='store_true', dest='verbose', help=u'Output verbose spec information from Ansible Service Broker', default=False ) subcmd.add_argument( '--output', '-o', action='store', dest='output', help=u'Specify verbose output format in yaml (default) or json', default='optional', choices=['yaml', 'json'] ) subcmd.add_argument( '--username', '-u', action='store', default=None, dest='basic_auth_username', help=u'Specify the basic auth username to be used' ) subcmd.add_argument( '--password', '-p', action='store', default=None, dest='basic_auth_password', help=u'Specify the basic auth password to be used' ) return
['def', 'subcmd_list_parser', '(', 'subcmd', ')', ':', 'subcmd', '.', 'add_argument', '(', "'--broker'", ',', 'action', '=', "'store'", ',', 'dest', '=', "'broker'", ',', 'help', '=', "u'Route to the Ansible Service Broker'", ')', 'subcmd', '.', 'add_argument', '(', "'--secure'", ',', 'action', '=', "'store_true'", ',', 'dest', '=', "'verify'", ',', 'help', '=', "u'Verify SSL connection to Ansible Service Broker'", ',', 'default', '=', 'False', ')', 'subcmd', '.', 'add_argument', '(', "'--ca-path'", ',', 'action', '=', "'store'", ',', 'dest', '=', "'cert'", ',', 'help', '=', "u'CA cert to use for verifying SSL connection to Ansible Service Broker'", ',', 'default', '=', 'None', ')', 'subcmd', '.', 'add_argument', '(', "'--verbose'", ',', "'-v'", ',', 'action', '=', "'store_true'", ',', 'dest', '=', "'verbose'", ',', 'help', '=', "u'Output verbose spec information from Ansible Service Broker'", ',', 'default', '=', 'False', ')', 'subcmd', '.', 'add_argument', '(', "'--output'", ',', "'-o'", ',', 'action', '=', "'store'", ',', 'dest', '=', "'output'", ',', 'help', '=', "u'Specify verbose output format in yaml (default) or json'", ',', 'default', '=', "'optional'", ',', 'choices', '=', '[', "'yaml'", ',', "'json'", ']', ')', 'subcmd', '.', 'add_argument', '(', "'--username'", ',', "'-u'", ',', 'action', '=', "'store'", ',', 'default', '=', 'None', ',', 'dest', '=', "'basic_auth_username'", ',', 'help', '=', "u'Specify the basic auth username to be used'", ')', 'subcmd', '.', 'add_argument', '(', "'--password'", ',', "'-p'", ',', 'action', '=', "'store'", ',', 'default', '=', 'None', ',', 'dest', '=', "'basic_auth_password'", ',', 'help', '=', "u'Specify the basic auth password to be used'", ')', 'return']
list subcommand
['list', 'subcommand']
train
https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L30-L85
8,214
kwikteam/phy
phy/plot/transform.py
_glslify
def _glslify(r): """Transform a string or a n-tuple to a valid GLSL expression.""" if isinstance(r, string_types): return r else: assert 2 <= len(r) <= 4 return 'vec{}({})'.format(len(r), ', '.join(map(str, r)))
python
def _glslify(r): """Transform a string or a n-tuple to a valid GLSL expression.""" if isinstance(r, string_types): return r else: assert 2 <= len(r) <= 4 return 'vec{}({})'.format(len(r), ', '.join(map(str, r)))
['def', '_glslify', '(', 'r', ')', ':', 'if', 'isinstance', '(', 'r', ',', 'string_types', ')', ':', 'return', 'r', 'else', ':', 'assert', '2', '<=', 'len', '(', 'r', ')', '<=', '4', 'return', "'vec{}({})'", '.', 'format', '(', 'len', '(', 'r', ')', ',', "', '", '.', 'join', '(', 'map', '(', 'str', ',', 'r', ')', ')', ')']
Transform a string or a n-tuple to a valid GLSL expression.
['Transform', 'a', 'string', 'or', 'a', 'n', '-', 'tuple', 'to', 'a', 'valid', 'GLSL', 'expression', '.']
train
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/transform.py#L48-L54
8,215
rshk/python-libxdo
xdo/__init__.py
Xdo.wait_for_window_map_state
def wait_for_window_map_state(self, window, state): """ Wait for a window to have a specific map state. State possibilities: IsUnmapped - window is not displayed. IsViewable - window is mapped and shown (though may be clipped by windows on top of it) IsUnviewable - window is mapped but a parent window is unmapped. :param window: the window you want to wait for. :param state: the state to wait for. """ _libxdo.xdo_wait_for_window_map_state(self._xdo, window, state)
python
def wait_for_window_map_state(self, window, state): """ Wait for a window to have a specific map state. State possibilities: IsUnmapped - window is not displayed. IsViewable - window is mapped and shown (though may be clipped by windows on top of it) IsUnviewable - window is mapped but a parent window is unmapped. :param window: the window you want to wait for. :param state: the state to wait for. """ _libxdo.xdo_wait_for_window_map_state(self._xdo, window, state)
['def', 'wait_for_window_map_state', '(', 'self', ',', 'window', ',', 'state', ')', ':', '_libxdo', '.', 'xdo_wait_for_window_map_state', '(', 'self', '.', '_xdo', ',', 'window', ',', 'state', ')']
Wait for a window to have a specific map state. State possibilities: IsUnmapped - window is not displayed. IsViewable - window is mapped and shown (though may be clipped by windows on top of it) IsUnviewable - window is mapped but a parent window is unmapped. :param window: the window you want to wait for. :param state: the state to wait for.
['Wait', 'for', 'a', 'window', 'to', 'have', 'a', 'specific', 'map', 'state', '.']
train
https://github.com/rshk/python-libxdo/blob/84cafa5943b005bc423edd28203a5266b3579ac3/xdo/__init__.py#L335-L348
8,216
christophertbrown/bioscripts
ctbBio/genome_abundance.py
absolute_abundance
def absolute_abundance(coverage, total_bases): """ absolute abundance = (number of bases mapped to genome / total number of bases in sample) * 100 """ absolute = {} for genome in coverage: absolute[genome] = [] index = 0 for calc in coverage[genome]: bases = calc[0] total = total_bases[index] absolute[genome].append((bases / total) * float(100)) index += 1 total_assembled = [0 for i in absolute[genome]] for genome in absolute: index = 0 for cov in absolute[genome]: total_assembled[index] += cov index += 1 absolute['Unassembled'] = [(100 - i) for i in total_assembled] return absolute
python
def absolute_abundance(coverage, total_bases): """ absolute abundance = (number of bases mapped to genome / total number of bases in sample) * 100 """ absolute = {} for genome in coverage: absolute[genome] = [] index = 0 for calc in coverage[genome]: bases = calc[0] total = total_bases[index] absolute[genome].append((bases / total) * float(100)) index += 1 total_assembled = [0 for i in absolute[genome]] for genome in absolute: index = 0 for cov in absolute[genome]: total_assembled[index] += cov index += 1 absolute['Unassembled'] = [(100 - i) for i in total_assembled] return absolute
['def', 'absolute_abundance', '(', 'coverage', ',', 'total_bases', ')', ':', 'absolute', '=', '{', '}', 'for', 'genome', 'in', 'coverage', ':', 'absolute', '[', 'genome', ']', '=', '[', ']', 'index', '=', '0', 'for', 'calc', 'in', 'coverage', '[', 'genome', ']', ':', 'bases', '=', 'calc', '[', '0', ']', 'total', '=', 'total_bases', '[', 'index', ']', 'absolute', '[', 'genome', ']', '.', 'append', '(', '(', 'bases', '/', 'total', ')', '*', 'float', '(', '100', ')', ')', 'index', '+=', '1', 'total_assembled', '=', '[', '0', 'for', 'i', 'in', 'absolute', '[', 'genome', ']', ']', 'for', 'genome', 'in', 'absolute', ':', 'index', '=', '0', 'for', 'cov', 'in', 'absolute', '[', 'genome', ']', ':', 'total_assembled', '[', 'index', ']', '+=', 'cov', 'index', '+=', '1', 'absolute', '[', "'Unassembled'", ']', '=', '[', '(', '100', '-', 'i', ')', 'for', 'i', 'in', 'total_assembled', ']', 'return', 'absolute']
absolute abundance = (number of bases mapped to genome / total number of bases in sample) * 100
['absolute', 'abundance', '=', '(', 'number', 'of', 'bases', 'mapped', 'to', 'genome', '/', 'total', 'number', 'of', 'bases', 'in', 'sample', ')', '*', '100']
train
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_abundance.py#L45-L65
8,217
usc-isi-i2/etk
etk/knowledge_graph.py
KnowledgeGraph.add_value
def add_value(self, field_name: str, value: object = None, json_path: str = None, json_path_extraction: str = None, keep_empty: bool = False) -> None: """ Add a value to knowledge graph. Input can either be a value or a json_path. If the input is json_path, the helper function _add_doc_value is called. If the input is a value, then it is handled Args: field_name: str, the field name in the knowledge graph value: the value to be added to the knowledge graph json_path: str, if json_path is provided, then get the value at this path in the doc json_path_extraction: str, discard_empty: bool, Returns: """ def validate(v): if v is not None: if isinstance(v, str): if v.strip() != "" or keep_empty: return True else: return False else: return True return False self.validate_field(field_name) if field_name not in self._kg: self._kg[field_name] = [] if json_path: self._add_doc_value(field_name, json_path) if validate(value): if not isinstance(value, list): value = [value] all_valid = True invalid = [] for a_value in value: if isinstance(a_value, Extraction): valid = self._add_single_value(field_name, a_value.value, provenance_path=str(json_path_extraction), keep_empty=keep_empty) elif isinstance(a_value, Segment): valid = self._add_single_value(field_name, a_value.value, provenance_path=a_value.json_path, keep_empty=keep_empty) else: valid = self._add_single_value(field_name, a_value, provenance_path=json_path_extraction, reference_type="constant", keep_empty=keep_empty) all_valid = all_valid and valid if not valid: invalid.append(field_name + ":" + str(a_value)) if not all_valid: print("Some kg value type invalid according to schema:" + json.dumps(invalid)) # raise KgValueError("Some kg value type invalid according to schema") # IF we did not add any value, remove the empty field we just added to kg if len(self._kg[field_name]) == 0: self._kg.pop(field_name)
python
def add_value(self, field_name: str, value: object = None, json_path: str = None, json_path_extraction: str = None, keep_empty: bool = False) -> None: """ Add a value to knowledge graph. Input can either be a value or a json_path. If the input is json_path, the helper function _add_doc_value is called. If the input is a value, then it is handled Args: field_name: str, the field name in the knowledge graph value: the value to be added to the knowledge graph json_path: str, if json_path is provided, then get the value at this path in the doc json_path_extraction: str, discard_empty: bool, Returns: """ def validate(v): if v is not None: if isinstance(v, str): if v.strip() != "" or keep_empty: return True else: return False else: return True return False self.validate_field(field_name) if field_name not in self._kg: self._kg[field_name] = [] if json_path: self._add_doc_value(field_name, json_path) if validate(value): if not isinstance(value, list): value = [value] all_valid = True invalid = [] for a_value in value: if isinstance(a_value, Extraction): valid = self._add_single_value(field_name, a_value.value, provenance_path=str(json_path_extraction), keep_empty=keep_empty) elif isinstance(a_value, Segment): valid = self._add_single_value(field_name, a_value.value, provenance_path=a_value.json_path, keep_empty=keep_empty) else: valid = self._add_single_value(field_name, a_value, provenance_path=json_path_extraction, reference_type="constant", keep_empty=keep_empty) all_valid = all_valid and valid if not valid: invalid.append(field_name + ":" + str(a_value)) if not all_valid: print("Some kg value type invalid according to schema:" + json.dumps(invalid)) # raise KgValueError("Some kg value type invalid according to schema") # IF we did not add any value, remove the empty field we just added to kg if len(self._kg[field_name]) == 0: self._kg.pop(field_name)
['def', 'add_value', '(', 'self', ',', 'field_name', ':', 'str', ',', 'value', ':', 'object', '=', 'None', ',', 'json_path', ':', 'str', '=', 'None', ',', 'json_path_extraction', ':', 'str', '=', 'None', ',', 'keep_empty', ':', 'bool', '=', 'False', ')', '->', 'None', ':', 'def', 'validate', '(', 'v', ')', ':', 'if', 'v', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'v', ',', 'str', ')', ':', 'if', 'v', '.', 'strip', '(', ')', '!=', '""', 'or', 'keep_empty', ':', 'return', 'True', 'else', ':', 'return', 'False', 'else', ':', 'return', 'True', 'return', 'False', 'self', '.', 'validate_field', '(', 'field_name', ')', 'if', 'field_name', 'not', 'in', 'self', '.', '_kg', ':', 'self', '.', '_kg', '[', 'field_name', ']', '=', '[', ']', 'if', 'json_path', ':', 'self', '.', '_add_doc_value', '(', 'field_name', ',', 'json_path', ')', 'if', 'validate', '(', 'value', ')', ':', 'if', 'not', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'value', '=', '[', 'value', ']', 'all_valid', '=', 'True', 'invalid', '=', '[', ']', 'for', 'a_value', 'in', 'value', ':', 'if', 'isinstance', '(', 'a_value', ',', 'Extraction', ')', ':', 'valid', '=', 'self', '.', '_add_single_value', '(', 'field_name', ',', 'a_value', '.', 'value', ',', 'provenance_path', '=', 'str', '(', 'json_path_extraction', ')', ',', 'keep_empty', '=', 'keep_empty', ')', 'elif', 'isinstance', '(', 'a_value', ',', 'Segment', ')', ':', 'valid', '=', 'self', '.', '_add_single_value', '(', 'field_name', ',', 'a_value', '.', 'value', ',', 'provenance_path', '=', 'a_value', '.', 'json_path', ',', 'keep_empty', '=', 'keep_empty', ')', 'else', ':', 'valid', '=', 'self', '.', '_add_single_value', '(', 'field_name', ',', 'a_value', ',', 'provenance_path', '=', 'json_path_extraction', ',', 'reference_type', '=', '"constant"', ',', 'keep_empty', '=', 'keep_empty', ')', 'all_valid', '=', 'all_valid', 'and', 'valid', 'if', 'not', 'valid', ':', 'invalid', '.', 'append', '(', 'field_name', '+', '":"', '+', 'str', '(', 'a_value', ')', ')', 'if', 'not', 'all_valid', ':', 'print', '(', '"Some kg value type invalid according to schema:"', '+', 'json', '.', 'dumps', '(', 'invalid', ')', ')', '# raise KgValueError("Some kg value type invalid according to schema")', '# IF we did not add any value, remove the empty field we just added to kg', 'if', 'len', '(', 'self', '.', '_kg', '[', 'field_name', ']', ')', '==', '0', ':', 'self', '.', '_kg', '.', 'pop', '(', 'field_name', ')']
Add a value to knowledge graph. Input can either be a value or a json_path. If the input is json_path, the helper function _add_doc_value is called. If the input is a value, then it is handled Args: field_name: str, the field name in the knowledge graph value: the value to be added to the knowledge graph json_path: str, if json_path is provided, then get the value at this path in the doc json_path_extraction: str, discard_empty: bool, Returns:
['Add', 'a', 'value', 'to', 'knowledge', 'graph', '.', 'Input', 'can', 'either', 'be', 'a', 'value', 'or', 'a', 'json_path', '.', 'If', 'the', 'input', 'is', 'json_path', 'the', 'helper', 'function', '_add_doc_value', 'is', 'called', '.', 'If', 'the', 'input', 'is', 'a', 'value', 'then', 'it', 'is', 'handled']
train
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph.py#L134-L194
8,218
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
IIIVZincBlendeAlloy.meff_SO
def meff_SO(self, **kwargs): ''' Returns the split-off hole effective mass calculated from Eg_Gamma(T), Delta_SO, Ep and F. Interpolation of Eg_Gamma(T), Delta_SO, Ep and luttinger1, and then calculation of meff_SO is recommended for alloys. ''' Eg = self.Eg_Gamma(**kwargs) Delta_SO = self.Delta_SO(**kwargs) Ep = self.Ep(**kwargs) luttinger1 = self.luttinger1(**kwargs) return 1./(luttinger1 - (Ep*Delta_SO)/(3*Eg*(Eg+Delta_SO)))
python
def meff_SO(self, **kwargs): ''' Returns the split-off hole effective mass calculated from Eg_Gamma(T), Delta_SO, Ep and F. Interpolation of Eg_Gamma(T), Delta_SO, Ep and luttinger1, and then calculation of meff_SO is recommended for alloys. ''' Eg = self.Eg_Gamma(**kwargs) Delta_SO = self.Delta_SO(**kwargs) Ep = self.Ep(**kwargs) luttinger1 = self.luttinger1(**kwargs) return 1./(luttinger1 - (Ep*Delta_SO)/(3*Eg*(Eg+Delta_SO)))
['def', 'meff_SO', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'Eg', '=', 'self', '.', 'Eg_Gamma', '(', '*', '*', 'kwargs', ')', 'Delta_SO', '=', 'self', '.', 'Delta_SO', '(', '*', '*', 'kwargs', ')', 'Ep', '=', 'self', '.', 'Ep', '(', '*', '*', 'kwargs', ')', 'luttinger1', '=', 'self', '.', 'luttinger1', '(', '*', '*', 'kwargs', ')', 'return', '1.', '/', '(', 'luttinger1', '-', '(', 'Ep', '*', 'Delta_SO', ')', '/', '(', '3', '*', 'Eg', '*', '(', 'Eg', '+', 'Delta_SO', ')', ')', ')']
Returns the split-off hole effective mass calculated from Eg_Gamma(T), Delta_SO, Ep and F. Interpolation of Eg_Gamma(T), Delta_SO, Ep and luttinger1, and then calculation of meff_SO is recommended for alloys.
['Returns', 'the', 'split', '-', 'off', 'hole', 'effective', 'mass', 'calculated', 'from', 'Eg_Gamma', '(', 'T', ')', 'Delta_SO', 'Ep', 'and', 'F', '.', 'Interpolation', 'of', 'Eg_Gamma', '(', 'T', ')', 'Delta_SO', 'Ep', 'and', 'luttinger1', 'and', 'then', 'calculation', 'of', 'meff_SO', 'is', 'recommended', 'for', 'alloys', '.']
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L168-L180
8,219
NiklasRosenstein-Python/nr-deprecated
nr/tools/versionupgrade.py
parse_config
def parse_config(filename): """ Parses a versionupgrade configuration file. Example: tag v{VERSION} branch v{VERSION} message Prepare {VERSION} release upgrade setup.py: version = '{VERSION}' upgrade __init__.py:__version__ = '{VERSION}' sub docs/changelog/v{VERSION}.md:# v{VERSION} (unreleased):# v{VERSION} ({DATE}) Available commands: - tag: Create a Git tag with the specified name. - branch: Create a Git branch with the specified name. - message: The commit message for upgraded version numbers. - upgrade: Upgrade the version number in the file matching the pattern. The same file may be listed multiple times. The pattern may actually be a regular expression and will be searched in every line of the file. - sub: Specify a file where the part of the file matching the first string will be replaced by the second string. Returns a #Config object. """ tag = None branch = None message = 'Prepare {VERSION} release.' upgrades = {} subs = {} with open(filename) as fp: for i, line in enumerate(fp): line = line.strip() if not line or line.startswith('#'): continue key, sep, value = line.partition(' ') if not key or not value: raise ValueError('invalid configuration file at line {}'.format(i+1)) if key == 'tag': tag = value.strip() elif key == 'branch': branch = value.strip() elif key == 'message': message = value.strip() elif key == 'upgrade': filename, sep, pattern = value.partition(':') if not filename or not sep or not pattern or '{VERSION}' not in pattern: raise ValueError('invalid upgrade argument at line {}'.format(i+1)) upgrade = upgrades.setdefault(filename, []) upgrade.append(pattern) elif key == 'sub': filename, sep, pattern = value.partition(':') pattern = pattern.partition(':')[::2] if not pattern[0] or not pattern[1]: raise ValueError('invalid sub argument at line {}'.format(i+1)) subs.setdefault(filename, []).append(pattern) else: raise ValueError('invalid command {!r} at line {}'.format(key, i+1)) return Config(tag, branch, message, upgrades, subs)
python
def parse_config(filename): """ Parses a versionupgrade configuration file. Example: tag v{VERSION} branch v{VERSION} message Prepare {VERSION} release upgrade setup.py: version = '{VERSION}' upgrade __init__.py:__version__ = '{VERSION}' sub docs/changelog/v{VERSION}.md:# v{VERSION} (unreleased):# v{VERSION} ({DATE}) Available commands: - tag: Create a Git tag with the specified name. - branch: Create a Git branch with the specified name. - message: The commit message for upgraded version numbers. - upgrade: Upgrade the version number in the file matching the pattern. The same file may be listed multiple times. The pattern may actually be a regular expression and will be searched in every line of the file. - sub: Specify a file where the part of the file matching the first string will be replaced by the second string. Returns a #Config object. """ tag = None branch = None message = 'Prepare {VERSION} release.' upgrades = {} subs = {} with open(filename) as fp: for i, line in enumerate(fp): line = line.strip() if not line or line.startswith('#'): continue key, sep, value = line.partition(' ') if not key or not value: raise ValueError('invalid configuration file at line {}'.format(i+1)) if key == 'tag': tag = value.strip() elif key == 'branch': branch = value.strip() elif key == 'message': message = value.strip() elif key == 'upgrade': filename, sep, pattern = value.partition(':') if not filename or not sep or not pattern or '{VERSION}' not in pattern: raise ValueError('invalid upgrade argument at line {}'.format(i+1)) upgrade = upgrades.setdefault(filename, []) upgrade.append(pattern) elif key == 'sub': filename, sep, pattern = value.partition(':') pattern = pattern.partition(':')[::2] if not pattern[0] or not pattern[1]: raise ValueError('invalid sub argument at line {}'.format(i+1)) subs.setdefault(filename, []).append(pattern) else: raise ValueError('invalid command {!r} at line {}'.format(key, i+1)) return Config(tag, branch, message, upgrades, subs)
['def', 'parse_config', '(', 'filename', ')', ':', 'tag', '=', 'None', 'branch', '=', 'None', 'message', '=', "'Prepare {VERSION} release.'", 'upgrades', '=', '{', '}', 'subs', '=', '{', '}', 'with', 'open', '(', 'filename', ')', 'as', 'fp', ':', 'for', 'i', ',', 'line', 'in', 'enumerate', '(', 'fp', ')', ':', 'line', '=', 'line', '.', 'strip', '(', ')', 'if', 'not', 'line', 'or', 'line', '.', 'startswith', '(', "'#'", ')', ':', 'continue', 'key', ',', 'sep', ',', 'value', '=', 'line', '.', 'partition', '(', "' '", ')', 'if', 'not', 'key', 'or', 'not', 'value', ':', 'raise', 'ValueError', '(', "'invalid configuration file at line {}'", '.', 'format', '(', 'i', '+', '1', ')', ')', 'if', 'key', '==', "'tag'", ':', 'tag', '=', 'value', '.', 'strip', '(', ')', 'elif', 'key', '==', "'branch'", ':', 'branch', '=', 'value', '.', 'strip', '(', ')', 'elif', 'key', '==', "'message'", ':', 'message', '=', 'value', '.', 'strip', '(', ')', 'elif', 'key', '==', "'upgrade'", ':', 'filename', ',', 'sep', ',', 'pattern', '=', 'value', '.', 'partition', '(', "':'", ')', 'if', 'not', 'filename', 'or', 'not', 'sep', 'or', 'not', 'pattern', 'or', "'{VERSION}'", 'not', 'in', 'pattern', ':', 'raise', 'ValueError', '(', "'invalid upgrade argument at line {}'", '.', 'format', '(', 'i', '+', '1', ')', ')', 'upgrade', '=', 'upgrades', '.', 'setdefault', '(', 'filename', ',', '[', ']', ')', 'upgrade', '.', 'append', '(', 'pattern', ')', 'elif', 'key', '==', "'sub'", ':', 'filename', ',', 'sep', ',', 'pattern', '=', 'value', '.', 'partition', '(', "':'", ')', 'pattern', '=', 'pattern', '.', 'partition', '(', "':'", ')', '[', ':', ':', '2', ']', 'if', 'not', 'pattern', '[', '0', ']', 'or', 'not', 'pattern', '[', '1', ']', ':', 'raise', 'ValueError', '(', "'invalid sub argument at line {}'", '.', 'format', '(', 'i', '+', '1', ')', ')', 'subs', '.', 'setdefault', '(', 'filename', ',', '[', ']', ')', '.', 'append', '(', 'pattern', ')', 'else', ':', 'raise', 'ValueError', '(', "'invalid command {!r} at line {}'", '.', 'format', '(', 'key', ',', 'i', '+', '1', ')', ')', 'return', 'Config', '(', 'tag', ',', 'branch', ',', 'message', ',', 'upgrades', ',', 'subs', ')']
Parses a versionupgrade configuration file. Example: tag v{VERSION} branch v{VERSION} message Prepare {VERSION} release upgrade setup.py: version = '{VERSION}' upgrade __init__.py:__version__ = '{VERSION}' sub docs/changelog/v{VERSION}.md:# v{VERSION} (unreleased):# v{VERSION} ({DATE}) Available commands: - tag: Create a Git tag with the specified name. - branch: Create a Git branch with the specified name. - message: The commit message for upgraded version numbers. - upgrade: Upgrade the version number in the file matching the pattern. The same file may be listed multiple times. The pattern may actually be a regular expression and will be searched in every line of the file. - sub: Specify a file where the part of the file matching the first string will be replaced by the second string. Returns a #Config object.
['Parses', 'a', 'versionupgrade', 'configuration', 'file', '.', 'Example', ':']
train
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/tools/versionupgrade.py#L40-L102
8,220
ampl/amplpy
amplpy/ampl.py
AMPL.getObjectives
def getObjectives(self): """ Get all the objectives declared. """ objectives = lock_and_call( lambda: self._impl.getObjectives(), self._lock ) return EntityMap(objectives, Objective)
python
def getObjectives(self): """ Get all the objectives declared. """ objectives = lock_and_call( lambda: self._impl.getObjectives(), self._lock ) return EntityMap(objectives, Objective)
['def', 'getObjectives', '(', 'self', ')', ':', 'objectives', '=', 'lock_and_call', '(', 'lambda', ':', 'self', '.', '_impl', '.', 'getObjectives', '(', ')', ',', 'self', '.', '_lock', ')', 'return', 'EntityMap', '(', 'objectives', ',', 'Objective', ')']
Get all the objectives declared.
['Get', 'all', 'the', 'objectives', 'declared', '.']
train
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/ampl.py#L819-L827
8,221
PmagPy/PmagPy
programs/demag_gui.py
Demag_GUI.convert_ages_to_calendar_year
def convert_ages_to_calendar_year(self, er_ages_rec): """ convert all age units to calendar year Parameters ---------- er_ages_rec : Dict type object containing preferbly at least keys 'age', 'age_unit', and either 'age_range_high', 'age_range_low' or 'age_sigma' Returns ------- er_ages_rec : Same dict object input but altered to have new records 'age_cal_year_range_low' and 'age_cal_year_range_high' """ if "age" not in list(er_ages_rec.keys()): return(er_ages_rec) if "age_unit" not in list(er_ages_rec.keys()): return(er_ages_rec) if er_ages_rec["age_unit"] == "": return(er_ages_rec) if er_ages_rec["age"] == "": if "age_range_high" in list(er_ages_rec.keys()) and "age_range_low" in list(er_ages_rec.keys()): if er_ages_rec["age_range_high"] != "" and er_ages_rec["age_range_low"] != "": er_ages_rec["age"] = scipy.mean( [float(er_ages_rec["age_range_high"]), float(er_ages_rec["age_range_low"])]) if er_ages_rec["age"] == "": return(er_ages_rec) age_unit = er_ages_rec["age_unit"] # Fix 'age': mutliplier = 1 if age_unit == "Ga": mutliplier = -1e9 if age_unit == "Ma": mutliplier = -1e6 if age_unit == "Ka": mutliplier = -1e3 if age_unit == "Years AD (+/-)" or age_unit == "Years Cal AD (+/-)": mutliplier = 1 if age_unit == "Years BP" or age_unit == "Years Cal BP": mutliplier = 1 age = float(er_ages_rec["age"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age = 1950-age er_ages_rec['age_cal_year'] = age # Fix 'age_range_low': age_range_low = age age_range_high = age age_sigma = 0 if "age_sigma" in list(er_ages_rec.keys()) and er_ages_rec["age_sigma"] != "": age_sigma = float(er_ages_rec["age_sigma"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_sigma = 1950-age_sigma age_range_low = age-age_sigma age_range_high = age+age_sigma if "age_range_high" in list(er_ages_rec.keys()) and "age_range_low" in list(er_ages_rec.keys()): if er_ages_rec["age_range_high"] != "" and er_ages_rec["age_range_low"] != "": age_range_high = float( er_ages_rec["age_range_high"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_range_high = 1950-age_range_high age_range_low = float(er_ages_rec["age_range_low"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_range_low = 1950-age_range_low er_ages_rec['age_cal_year_range_low'] = age_range_low er_ages_rec['age_cal_year_range_high'] = age_range_high return(er_ages_rec)
python
def convert_ages_to_calendar_year(self, er_ages_rec): """ convert all age units to calendar year Parameters ---------- er_ages_rec : Dict type object containing preferbly at least keys 'age', 'age_unit', and either 'age_range_high', 'age_range_low' or 'age_sigma' Returns ------- er_ages_rec : Same dict object input but altered to have new records 'age_cal_year_range_low' and 'age_cal_year_range_high' """ if "age" not in list(er_ages_rec.keys()): return(er_ages_rec) if "age_unit" not in list(er_ages_rec.keys()): return(er_ages_rec) if er_ages_rec["age_unit"] == "": return(er_ages_rec) if er_ages_rec["age"] == "": if "age_range_high" in list(er_ages_rec.keys()) and "age_range_low" in list(er_ages_rec.keys()): if er_ages_rec["age_range_high"] != "" and er_ages_rec["age_range_low"] != "": er_ages_rec["age"] = scipy.mean( [float(er_ages_rec["age_range_high"]), float(er_ages_rec["age_range_low"])]) if er_ages_rec["age"] == "": return(er_ages_rec) age_unit = er_ages_rec["age_unit"] # Fix 'age': mutliplier = 1 if age_unit == "Ga": mutliplier = -1e9 if age_unit == "Ma": mutliplier = -1e6 if age_unit == "Ka": mutliplier = -1e3 if age_unit == "Years AD (+/-)" or age_unit == "Years Cal AD (+/-)": mutliplier = 1 if age_unit == "Years BP" or age_unit == "Years Cal BP": mutliplier = 1 age = float(er_ages_rec["age"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age = 1950-age er_ages_rec['age_cal_year'] = age # Fix 'age_range_low': age_range_low = age age_range_high = age age_sigma = 0 if "age_sigma" in list(er_ages_rec.keys()) and er_ages_rec["age_sigma"] != "": age_sigma = float(er_ages_rec["age_sigma"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_sigma = 1950-age_sigma age_range_low = age-age_sigma age_range_high = age+age_sigma if "age_range_high" in list(er_ages_rec.keys()) and "age_range_low" in list(er_ages_rec.keys()): if er_ages_rec["age_range_high"] != "" and er_ages_rec["age_range_low"] != "": age_range_high = float( er_ages_rec["age_range_high"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_range_high = 1950-age_range_high age_range_low = float(er_ages_rec["age_range_low"])*mutliplier if age_unit == "Years BP" or age_unit == "Years Cal BP": age_range_low = 1950-age_range_low er_ages_rec['age_cal_year_range_low'] = age_range_low er_ages_rec['age_cal_year_range_high'] = age_range_high return(er_ages_rec)
['def', 'convert_ages_to_calendar_year', '(', 'self', ',', 'er_ages_rec', ')', ':', 'if', '"age"', 'not', 'in', 'list', '(', 'er_ages_rec', '.', 'keys', '(', ')', ')', ':', 'return', '(', 'er_ages_rec', ')', 'if', '"age_unit"', 'not', 'in', 'list', '(', 'er_ages_rec', '.', 'keys', '(', ')', ')', ':', 'return', '(', 'er_ages_rec', ')', 'if', 'er_ages_rec', '[', '"age_unit"', ']', '==', '""', ':', 'return', '(', 'er_ages_rec', ')', 'if', 'er_ages_rec', '[', '"age"', ']', '==', '""', ':', 'if', '"age_range_high"', 'in', 'list', '(', 'er_ages_rec', '.', 'keys', '(', ')', ')', 'and', '"age_range_low"', 'in', 'list', '(', 'er_ages_rec', '.', 'keys', '(', ')', ')', ':', 'if', 'er_ages_rec', '[', '"age_range_high"', ']', '!=', '""', 'and', 'er_ages_rec', '[', '"age_range_low"', ']', '!=', '""', ':', 'er_ages_rec', '[', '"age"', ']', '=', 'scipy', '.', 'mean', '(', '[', 'float', '(', 'er_ages_rec', '[', '"age_range_high"', ']', ')', ',', 'float', '(', 'er_ages_rec', '[', '"age_range_low"', ']', ')', ']', ')', 'if', 'er_ages_rec', '[', '"age"', ']', '==', '""', ':', 'return', '(', 'er_ages_rec', ')', 'age_unit', '=', 'er_ages_rec', '[', '"age_unit"', ']', "# Fix 'age':", 'mutliplier', '=', '1', 'if', 'age_unit', '==', '"Ga"', ':', 'mutliplier', '=', '-', '1e9', 'if', 'age_unit', '==', '"Ma"', ':', 'mutliplier', '=', '-', '1e6', 'if', 'age_unit', '==', '"Ka"', ':', 'mutliplier', '=', '-', '1e3', 'if', 'age_unit', '==', '"Years AD (+/-)"', 'or', 'age_unit', '==', '"Years Cal AD (+/-)"', ':', 'mutliplier', '=', '1', 'if', 'age_unit', '==', '"Years BP"', 'or', 'age_unit', '==', '"Years Cal BP"', ':', 'mutliplier', '=', '1', 'age', '=', 'float', '(', 'er_ages_rec', '[', '"age"', ']', ')', '*', 'mutliplier', 'if', 'age_unit', '==', '"Years BP"', 'or', 'age_unit', '==', '"Years Cal BP"', ':', 'age', '=', '1950', '-', 'age', 'er_ages_rec', '[', "'age_cal_year'", ']', '=', 'age', "# Fix 'age_range_low':", 'age_range_low', '=', 'age', 'age_range_high', '=', 'age', 'age_sigma', '=', '0', 'if', '"age_sigma"', 'in', 'list', '(', 'er_ages_rec', '.', 'keys', '(', ')', ')', 'and', 'er_ages_rec', '[', '"age_sigma"', ']', '!=', '""', ':', 'age_sigma', '=', 'float', '(', 'er_ages_rec', '[', '"age_sigma"', ']', ')', '*', 'mutliplier', 'if', 'age_unit', '==', '"Years BP"', 'or', 'age_unit', '==', '"Years Cal BP"', ':', 'age_sigma', '=', '1950', '-', 'age_sigma', 'age_range_low', '=', 'age', '-', 'age_sigma', 'age_range_high', '=', 'age', '+', 'age_sigma', 'if', '"age_range_high"', 'in', 'list', '(', 'er_ages_rec', '.', 'keys', '(', ')', ')', 'and', '"age_range_low"', 'in', 'list', '(', 'er_ages_rec', '.', 'keys', '(', ')', ')', ':', 'if', 'er_ages_rec', '[', '"age_range_high"', ']', '!=', '""', 'and', 'er_ages_rec', '[', '"age_range_low"', ']', '!=', '""', ':', 'age_range_high', '=', 'float', '(', 'er_ages_rec', '[', '"age_range_high"', ']', ')', '*', 'mutliplier', 'if', 'age_unit', '==', '"Years BP"', 'or', 'age_unit', '==', '"Years Cal BP"', ':', 'age_range_high', '=', '1950', '-', 'age_range_high', 'age_range_low', '=', 'float', '(', 'er_ages_rec', '[', '"age_range_low"', ']', ')', '*', 'mutliplier', 'if', 'age_unit', '==', '"Years BP"', 'or', 'age_unit', '==', '"Years Cal BP"', ':', 'age_range_low', '=', '1950', '-', 'age_range_low', 'er_ages_rec', '[', "'age_cal_year_range_low'", ']', '=', 'age_range_low', 'er_ages_rec', '[', "'age_cal_year_range_high'", ']', '=', 'age_range_high', 'return', '(', 'er_ages_rec', ')']
convert all age units to calendar year Parameters ---------- er_ages_rec : Dict type object containing preferbly at least keys 'age', 'age_unit', and either 'age_range_high', 'age_range_low' or 'age_sigma' Returns ------- er_ages_rec : Same dict object input but altered to have new records 'age_cal_year_range_low' and 'age_cal_year_range_high'
['convert', 'all', 'age', 'units', 'to', 'calendar', 'year']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L2773-L2846
8,222
kennknowles/python-jsonpath-rw
jsonpath_rw/jsonpath.py
DatumInContext.id_pseudopath
def id_pseudopath(self): """ Looks like a path, but with ids stuck in when available """ try: pseudopath = Fields(str(self.value[auto_id_field])) except (TypeError, AttributeError, KeyError): # This may not be all the interesting exceptions pseudopath = self.path if self.context: return self.context.id_pseudopath.child(pseudopath) else: return pseudopath
python
def id_pseudopath(self): """ Looks like a path, but with ids stuck in when available """ try: pseudopath = Fields(str(self.value[auto_id_field])) except (TypeError, AttributeError, KeyError): # This may not be all the interesting exceptions pseudopath = self.path if self.context: return self.context.id_pseudopath.child(pseudopath) else: return pseudopath
['def', 'id_pseudopath', '(', 'self', ')', ':', 'try', ':', 'pseudopath', '=', 'Fields', '(', 'str', '(', 'self', '.', 'value', '[', 'auto_id_field', ']', ')', ')', 'except', '(', 'TypeError', ',', 'AttributeError', ',', 'KeyError', ')', ':', '# This may not be all the interesting exceptions', 'pseudopath', '=', 'self', '.', 'path', 'if', 'self', '.', 'context', ':', 'return', 'self', '.', 'context', '.', 'id_pseudopath', '.', 'child', '(', 'pseudopath', ')', 'else', ':', 'return', 'pseudopath']
Looks like a path, but with ids stuck in when available
['Looks', 'like', 'a', 'path', 'but', 'with', 'ids', 'stuck', 'in', 'when', 'available']
train
https://github.com/kennknowles/python-jsonpath-rw/blob/f615451d7b405e23e0f80b15cad03b1427b0256d/jsonpath_rw/jsonpath.py#L97-L109
8,223
ejeschke/ginga
ginga/rv/plugins/PlotTable.py
PlotTable.plot_two_columns
def plot_two_columns(self, reset_xlimits=False, reset_ylimits=False): """Simple line plot for two selected columns.""" self.clear_plot() if self.tab is None: # No table data to plot return plt_kw = { 'lw': self.settings.get('linewidth', 1), 'ls': self.settings.get('linestyle', '-'), 'color': self.settings.get('linecolor', 'blue'), 'ms': self.settings.get('markersize', 6), 'mew': self.settings.get('markerwidth', 0.5), 'mfc': self.settings.get('markercolor', 'red')} plt_kw['mec'] = plt_kw['mfc'] try: x_data, y_data, marker = self._get_plot_data() self.tab_plot.plot( x_data, y_data, xtitle=self._get_label('x'), ytitle=self._get_label('y'), marker=marker, **plt_kw) if reset_xlimits: self.set_ylim_cb() self.set_xlimits_widgets() if reset_ylimits: self.set_xlim_cb() self.set_ylimits_widgets() if not (reset_xlimits or reset_ylimits): self.set_xlim_cb(redraw=False) self.set_ylim_cb() except Exception as e: self.logger.error(str(e)) else: self.save_plot.set_enabled(True)
python
def plot_two_columns(self, reset_xlimits=False, reset_ylimits=False): """Simple line plot for two selected columns.""" self.clear_plot() if self.tab is None: # No table data to plot return plt_kw = { 'lw': self.settings.get('linewidth', 1), 'ls': self.settings.get('linestyle', '-'), 'color': self.settings.get('linecolor', 'blue'), 'ms': self.settings.get('markersize', 6), 'mew': self.settings.get('markerwidth', 0.5), 'mfc': self.settings.get('markercolor', 'red')} plt_kw['mec'] = plt_kw['mfc'] try: x_data, y_data, marker = self._get_plot_data() self.tab_plot.plot( x_data, y_data, xtitle=self._get_label('x'), ytitle=self._get_label('y'), marker=marker, **plt_kw) if reset_xlimits: self.set_ylim_cb() self.set_xlimits_widgets() if reset_ylimits: self.set_xlim_cb() self.set_ylimits_widgets() if not (reset_xlimits or reset_ylimits): self.set_xlim_cb(redraw=False) self.set_ylim_cb() except Exception as e: self.logger.error(str(e)) else: self.save_plot.set_enabled(True)
['def', 'plot_two_columns', '(', 'self', ',', 'reset_xlimits', '=', 'False', ',', 'reset_ylimits', '=', 'False', ')', ':', 'self', '.', 'clear_plot', '(', ')', 'if', 'self', '.', 'tab', 'is', 'None', ':', '# No table data to plot', 'return', 'plt_kw', '=', '{', "'lw'", ':', 'self', '.', 'settings', '.', 'get', '(', "'linewidth'", ',', '1', ')', ',', "'ls'", ':', 'self', '.', 'settings', '.', 'get', '(', "'linestyle'", ',', "'-'", ')', ',', "'color'", ':', 'self', '.', 'settings', '.', 'get', '(', "'linecolor'", ',', "'blue'", ')', ',', "'ms'", ':', 'self', '.', 'settings', '.', 'get', '(', "'markersize'", ',', '6', ')', ',', "'mew'", ':', 'self', '.', 'settings', '.', 'get', '(', "'markerwidth'", ',', '0.5', ')', ',', "'mfc'", ':', 'self', '.', 'settings', '.', 'get', '(', "'markercolor'", ',', "'red'", ')', '}', 'plt_kw', '[', "'mec'", ']', '=', 'plt_kw', '[', "'mfc'", ']', 'try', ':', 'x_data', ',', 'y_data', ',', 'marker', '=', 'self', '.', '_get_plot_data', '(', ')', 'self', '.', 'tab_plot', '.', 'plot', '(', 'x_data', ',', 'y_data', ',', 'xtitle', '=', 'self', '.', '_get_label', '(', "'x'", ')', ',', 'ytitle', '=', 'self', '.', '_get_label', '(', "'y'", ')', ',', 'marker', '=', 'marker', ',', '*', '*', 'plt_kw', ')', 'if', 'reset_xlimits', ':', 'self', '.', 'set_ylim_cb', '(', ')', 'self', '.', 'set_xlimits_widgets', '(', ')', 'if', 'reset_ylimits', ':', 'self', '.', 'set_xlim_cb', '(', ')', 'self', '.', 'set_ylimits_widgets', '(', ')', 'if', 'not', '(', 'reset_xlimits', 'or', 'reset_ylimits', ')', ':', 'self', '.', 'set_xlim_cb', '(', 'redraw', '=', 'False', ')', 'self', '.', 'set_ylim_cb', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'self', '.', 'logger', '.', 'error', '(', 'str', '(', 'e', ')', ')', 'else', ':', 'self', '.', 'save_plot', '.', 'set_enabled', '(', 'True', ')']
Simple line plot for two selected columns.
['Simple', 'line', 'plot', 'for', 'two', 'selected', 'columns', '.']
train
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/PlotTable.py#L237-L274
8,224
deepmind/sonnet
sonnet/examples/dataset_shakespeare.py
TinyShakespeareDataset._build
def _build(self): """Returns a tuple containing observation and target one-hot tensors.""" q = tf.FIFOQueue( self._queue_capacity, [self._dtype, self._dtype], shapes=[[self._num_steps, self._batch_size, self._vocab_size]]*2) obs, target = tf.py_func(self._get_batch, [], [tf.int32, tf.int32]) obs = self._one_hot(obs) target = self._one_hot(target) enqueue_op = q.enqueue([obs, target]) obs, target = q.dequeue() tf.train.add_queue_runner(tf.train.QueueRunner(q, [enqueue_op])) return SequenceDataOpsNoMask(obs, target)
python
def _build(self): """Returns a tuple containing observation and target one-hot tensors.""" q = tf.FIFOQueue( self._queue_capacity, [self._dtype, self._dtype], shapes=[[self._num_steps, self._batch_size, self._vocab_size]]*2) obs, target = tf.py_func(self._get_batch, [], [tf.int32, tf.int32]) obs = self._one_hot(obs) target = self._one_hot(target) enqueue_op = q.enqueue([obs, target]) obs, target = q.dequeue() tf.train.add_queue_runner(tf.train.QueueRunner(q, [enqueue_op])) return SequenceDataOpsNoMask(obs, target)
['def', '_build', '(', 'self', ')', ':', 'q', '=', 'tf', '.', 'FIFOQueue', '(', 'self', '.', '_queue_capacity', ',', '[', 'self', '.', '_dtype', ',', 'self', '.', '_dtype', ']', ',', 'shapes', '=', '[', '[', 'self', '.', '_num_steps', ',', 'self', '.', '_batch_size', ',', 'self', '.', '_vocab_size', ']', ']', '*', '2', ')', 'obs', ',', 'target', '=', 'tf', '.', 'py_func', '(', 'self', '.', '_get_batch', ',', '[', ']', ',', '[', 'tf', '.', 'int32', ',', 'tf', '.', 'int32', ']', ')', 'obs', '=', 'self', '.', '_one_hot', '(', 'obs', ')', 'target', '=', 'self', '.', '_one_hot', '(', 'target', ')', 'enqueue_op', '=', 'q', '.', 'enqueue', '(', '[', 'obs', ',', 'target', ']', ')', 'obs', ',', 'target', '=', 'q', '.', 'dequeue', '(', ')', 'tf', '.', 'train', '.', 'add_queue_runner', '(', 'tf', '.', 'train', '.', 'QueueRunner', '(', 'q', ',', '[', 'enqueue_op', ']', ')', ')', 'return', 'SequenceDataOpsNoMask', '(', 'obs', ',', 'target', ')']
Returns a tuple containing observation and target one-hot tensors.
['Returns', 'a', 'tuple', 'containing', 'observation', 'and', 'target', 'one', '-', 'hot', 'tensors', '.']
train
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/examples/dataset_shakespeare.py#L183-L194
8,225
ArchiveTeam/wpull
wpull/warc/recorder.py
WARCRecorder.set_length_and_maybe_checksums
def set_length_and_maybe_checksums(self, record, payload_offset=None): '''Set the content length and possibly the checksums.''' if self._params.digests: record.compute_checksum(payload_offset) else: record.set_content_length()
python
def set_length_and_maybe_checksums(self, record, payload_offset=None): '''Set the content length and possibly the checksums.''' if self._params.digests: record.compute_checksum(payload_offset) else: record.set_content_length()
['def', 'set_length_and_maybe_checksums', '(', 'self', ',', 'record', ',', 'payload_offset', '=', 'None', ')', ':', 'if', 'self', '.', '_params', '.', 'digests', ':', 'record', '.', 'compute_checksum', '(', 'payload_offset', ')', 'else', ':', 'record', '.', 'set_content_length', '(', ')']
Set the content length and possibly the checksums.
['Set', 'the', 'content', 'length', 'and', 'possibly', 'the', 'checksums', '.']
train
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/warc/recorder.py#L304-L309
8,226
camptocamp/Studio
studio/controllers/datastores.py
DatastoresController.create
def create(self): """POST /datastores: Create a new item.""" # url('datastores') content = request.environ['wsgi.input'].read(int(request.environ['CONTENT_LENGTH'])) content = content.decode('utf8') content = simplejson.loads(content) new = DataStore(content['name'], content['type'], content['ogrstring']) results = meta.Session.add(new) meta.Session.commit() response.status = 201
python
def create(self): """POST /datastores: Create a new item.""" # url('datastores') content = request.environ['wsgi.input'].read(int(request.environ['CONTENT_LENGTH'])) content = content.decode('utf8') content = simplejson.loads(content) new = DataStore(content['name'], content['type'], content['ogrstring']) results = meta.Session.add(new) meta.Session.commit() response.status = 201
['def', 'create', '(', 'self', ')', ':', "# url('datastores')", 'content', '=', 'request', '.', 'environ', '[', "'wsgi.input'", ']', '.', 'read', '(', 'int', '(', 'request', '.', 'environ', '[', "'CONTENT_LENGTH'", ']', ')', ')', 'content', '=', 'content', '.', 'decode', '(', "'utf8'", ')', 'content', '=', 'simplejson', '.', 'loads', '(', 'content', ')', 'new', '=', 'DataStore', '(', 'content', '[', "'name'", ']', ',', 'content', '[', "'type'", ']', ',', 'content', '[', "'ogrstring'", ']', ')', 'results', '=', 'meta', '.', 'Session', '.', 'add', '(', 'new', ')', 'meta', '.', 'Session', '.', 'commit', '(', ')', 'response', '.', 'status', '=', '201']
POST /datastores: Create a new item.
['POST', '/', 'datastores', ':', 'Create', 'a', 'new', 'item', '.']
train
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/controllers/datastores.py#L53-L64
8,227
PhracturedBlue/asterisk_mbox
asterisk_mbox/__init__.py
Client.get_cdr
def get_cdr(self, start=0, count=-1, **kwargs): """Request range of CDR messages""" sha = encode_to_sha("{:d},{:d}".format(start, count)) return self._queue_msg({'cmd': cmd.CMD_MESSAGE_CDR, 'sha': sha}, **kwargs)
python
def get_cdr(self, start=0, count=-1, **kwargs): """Request range of CDR messages""" sha = encode_to_sha("{:d},{:d}".format(start, count)) return self._queue_msg({'cmd': cmd.CMD_MESSAGE_CDR, 'sha': sha}, **kwargs)
['def', 'get_cdr', '(', 'self', ',', 'start', '=', '0', ',', 'count', '=', '-', '1', ',', '*', '*', 'kwargs', ')', ':', 'sha', '=', 'encode_to_sha', '(', '"{:d},{:d}"', '.', 'format', '(', 'start', ',', 'count', ')', ')', 'return', 'self', '.', '_queue_msg', '(', '{', "'cmd'", ':', 'cmd', '.', 'CMD_MESSAGE_CDR', ',', "'sha'", ':', 'sha', '}', ',', '*', '*', 'kwargs', ')']
Request range of CDR messages
['Request', 'range', 'of', 'CDR', 'messages']
train
https://github.com/PhracturedBlue/asterisk_mbox/blob/275de1e71ed05c6acff1a5fa87f754f4d385a372/asterisk_mbox/__init__.py#L231-L235
8,228
Stranger6667/postmarker
postmarker/models/emails.py
EmailBatch.send
def send(self, **extra): """ Sends email batch. :return: Information about sent emails. :rtype: `list` """ emails = self.as_dict(**extra) responses = [self._manager._send_batch(*batch) for batch in chunks(emails, self.MAX_SIZE)] return sum(responses, [])
python
def send(self, **extra): """ Sends email batch. :return: Information about sent emails. :rtype: `list` """ emails = self.as_dict(**extra) responses = [self._manager._send_batch(*batch) for batch in chunks(emails, self.MAX_SIZE)] return sum(responses, [])
['def', 'send', '(', 'self', ',', '*', '*', 'extra', ')', ':', 'emails', '=', 'self', '.', 'as_dict', '(', '*', '*', 'extra', ')', 'responses', '=', '[', 'self', '.', '_manager', '.', '_send_batch', '(', '*', 'batch', ')', 'for', 'batch', 'in', 'chunks', '(', 'emails', ',', 'self', '.', 'MAX_SIZE', ')', ']', 'return', 'sum', '(', 'responses', ',', '[', ']', ')']
Sends email batch. :return: Information about sent emails. :rtype: `list`
['Sends', 'email', 'batch', '.']
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/emails.py#L256-L265
8,229
BernardFW/bernard
src/bernard/cli/_live_reload.py
start_child
async def start_child(): """ Start the child process that will look for changes in modules. """ logger.info('Started to watch for code changes') loop = asyncio.get_event_loop() watcher = aionotify.Watcher() flags = ( aionotify.Flags.MODIFY | aionotify.Flags.DELETE | aionotify.Flags.ATTRIB | aionotify.Flags.MOVED_TO | aionotify.Flags.MOVED_FROM | aionotify.Flags.CREATE | aionotify.Flags.DELETE_SELF | aionotify.Flags.MOVE_SELF ) watched_dirs = list_dirs() for dir_name in watched_dirs: watcher.watch(path=dir_name, flags=flags) await watcher.setup(loop) while True: evt = await watcher.get_event() file_path = path.join(evt.alias, evt.name) if file_path in watched_dirs or file_path.endswith('.py'): await asyncio.sleep(settings.CODE_RELOAD_DEBOUNCE) break watcher.close() exit_for_reload()
python
async def start_child(): """ Start the child process that will look for changes in modules. """ logger.info('Started to watch for code changes') loop = asyncio.get_event_loop() watcher = aionotify.Watcher() flags = ( aionotify.Flags.MODIFY | aionotify.Flags.DELETE | aionotify.Flags.ATTRIB | aionotify.Flags.MOVED_TO | aionotify.Flags.MOVED_FROM | aionotify.Flags.CREATE | aionotify.Flags.DELETE_SELF | aionotify.Flags.MOVE_SELF ) watched_dirs = list_dirs() for dir_name in watched_dirs: watcher.watch(path=dir_name, flags=flags) await watcher.setup(loop) while True: evt = await watcher.get_event() file_path = path.join(evt.alias, evt.name) if file_path in watched_dirs or file_path.endswith('.py'): await asyncio.sleep(settings.CODE_RELOAD_DEBOUNCE) break watcher.close() exit_for_reload()
['async', 'def', 'start_child', '(', ')', ':', 'logger', '.', 'info', '(', "'Started to watch for code changes'", ')', 'loop', '=', 'asyncio', '.', 'get_event_loop', '(', ')', 'watcher', '=', 'aionotify', '.', 'Watcher', '(', ')', 'flags', '=', '(', 'aionotify', '.', 'Flags', '.', 'MODIFY', '|', 'aionotify', '.', 'Flags', '.', 'DELETE', '|', 'aionotify', '.', 'Flags', '.', 'ATTRIB', '|', 'aionotify', '.', 'Flags', '.', 'MOVED_TO', '|', 'aionotify', '.', 'Flags', '.', 'MOVED_FROM', '|', 'aionotify', '.', 'Flags', '.', 'CREATE', '|', 'aionotify', '.', 'Flags', '.', 'DELETE_SELF', '|', 'aionotify', '.', 'Flags', '.', 'MOVE_SELF', ')', 'watched_dirs', '=', 'list_dirs', '(', ')', 'for', 'dir_name', 'in', 'watched_dirs', ':', 'watcher', '.', 'watch', '(', 'path', '=', 'dir_name', ',', 'flags', '=', 'flags', ')', 'await', 'watcher', '.', 'setup', '(', 'loop', ')', 'while', 'True', ':', 'evt', '=', 'await', 'watcher', '.', 'get_event', '(', ')', 'file_path', '=', 'path', '.', 'join', '(', 'evt', '.', 'alias', ',', 'evt', '.', 'name', ')', 'if', 'file_path', 'in', 'watched_dirs', 'or', 'file_path', '.', 'endswith', '(', "'.py'", ')', ':', 'await', 'asyncio', '.', 'sleep', '(', 'settings', '.', 'CODE_RELOAD_DEBOUNCE', ')', 'break', 'watcher', '.', 'close', '(', ')', 'exit_for_reload', '(', ')']
Start the child process that will look for changes in modules.
['Start', 'the', 'child', 'process', 'that', 'will', 'look', 'for', 'changes', 'in', 'modules', '.']
train
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/cli/_live_reload.py#L83-L120
8,230
CamDavidsonPilon/lifetimes
lifetimes/plotting.py
plot_expected_repeat_purchases
def plot_expected_repeat_purchases( model, title="Expected Number of Repeat Purchases per Customer", xlabel="Time Since First Purchase", ax=None, label=None, **kwargs ): """ Plot expected repeat purchases on calibration period . Parameters ---------- model: lifetimes model A fitted lifetimes model. max_frequency: int, optional The maximum frequency to plot. title: str, optional Figure title xlabel: str, optional Figure xlabel ax: matplotlib.AxesSubplot, optional Using user axes label: str, optional Label for plot. kwargs Passed into the matplotlib.pyplot.plot command. Returns ------- axes: matplotlib.AxesSubplot """ from matplotlib import pyplot as plt if ax is None: ax = plt.subplot(111) if plt.matplotlib.__version__ >= "1.5": color_cycle = ax._get_lines.prop_cycler color = coalesce(kwargs.pop("c", None), kwargs.pop("color", None), next(color_cycle)["color"]) else: color_cycle = ax._get_lines.color_cycle color = coalesce(kwargs.pop("c", None), kwargs.pop("color", None), next(color_cycle)) max_T = model.data["T"].max() times = np.linspace(0, max_T, 100) ax.plot(times, model.expected_number_of_purchases_up_to_time(times), color=color, label=label, **kwargs) times = np.linspace(max_T, 1.5 * max_T, 100) ax.plot(times, model.expected_number_of_purchases_up_to_time(times), color=color, ls="--", **kwargs) plt.title(title) plt.xlabel(xlabel) plt.legend(loc="lower right") return ax
python
def plot_expected_repeat_purchases( model, title="Expected Number of Repeat Purchases per Customer", xlabel="Time Since First Purchase", ax=None, label=None, **kwargs ): """ Plot expected repeat purchases on calibration period . Parameters ---------- model: lifetimes model A fitted lifetimes model. max_frequency: int, optional The maximum frequency to plot. title: str, optional Figure title xlabel: str, optional Figure xlabel ax: matplotlib.AxesSubplot, optional Using user axes label: str, optional Label for plot. kwargs Passed into the matplotlib.pyplot.plot command. Returns ------- axes: matplotlib.AxesSubplot """ from matplotlib import pyplot as plt if ax is None: ax = plt.subplot(111) if plt.matplotlib.__version__ >= "1.5": color_cycle = ax._get_lines.prop_cycler color = coalesce(kwargs.pop("c", None), kwargs.pop("color", None), next(color_cycle)["color"]) else: color_cycle = ax._get_lines.color_cycle color = coalesce(kwargs.pop("c", None), kwargs.pop("color", None), next(color_cycle)) max_T = model.data["T"].max() times = np.linspace(0, max_T, 100) ax.plot(times, model.expected_number_of_purchases_up_to_time(times), color=color, label=label, **kwargs) times = np.linspace(max_T, 1.5 * max_T, 100) ax.plot(times, model.expected_number_of_purchases_up_to_time(times), color=color, ls="--", **kwargs) plt.title(title) plt.xlabel(xlabel) plt.legend(loc="lower right") return ax
['def', 'plot_expected_repeat_purchases', '(', 'model', ',', 'title', '=', '"Expected Number of Repeat Purchases per Customer"', ',', 'xlabel', '=', '"Time Since First Purchase"', ',', 'ax', '=', 'None', ',', 'label', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'from', 'matplotlib', 'import', 'pyplot', 'as', 'plt', 'if', 'ax', 'is', 'None', ':', 'ax', '=', 'plt', '.', 'subplot', '(', '111', ')', 'if', 'plt', '.', 'matplotlib', '.', '__version__', '>=', '"1.5"', ':', 'color_cycle', '=', 'ax', '.', '_get_lines', '.', 'prop_cycler', 'color', '=', 'coalesce', '(', 'kwargs', '.', 'pop', '(', '"c"', ',', 'None', ')', ',', 'kwargs', '.', 'pop', '(', '"color"', ',', 'None', ')', ',', 'next', '(', 'color_cycle', ')', '[', '"color"', ']', ')', 'else', ':', 'color_cycle', '=', 'ax', '.', '_get_lines', '.', 'color_cycle', 'color', '=', 'coalesce', '(', 'kwargs', '.', 'pop', '(', '"c"', ',', 'None', ')', ',', 'kwargs', '.', 'pop', '(', '"color"', ',', 'None', ')', ',', 'next', '(', 'color_cycle', ')', ')', 'max_T', '=', 'model', '.', 'data', '[', '"T"', ']', '.', 'max', '(', ')', 'times', '=', 'np', '.', 'linspace', '(', '0', ',', 'max_T', ',', '100', ')', 'ax', '.', 'plot', '(', 'times', ',', 'model', '.', 'expected_number_of_purchases_up_to_time', '(', 'times', ')', ',', 'color', '=', 'color', ',', 'label', '=', 'label', ',', '*', '*', 'kwargs', ')', 'times', '=', 'np', '.', 'linspace', '(', 'max_T', ',', '1.5', '*', 'max_T', ',', '100', ')', 'ax', '.', 'plot', '(', 'times', ',', 'model', '.', 'expected_number_of_purchases_up_to_time', '(', 'times', ')', ',', 'color', '=', 'color', ',', 'ls', '=', '"--"', ',', '*', '*', 'kwargs', ')', 'plt', '.', 'title', '(', 'title', ')', 'plt', '.', 'xlabel', '(', 'xlabel', ')', 'plt', '.', 'legend', '(', 'loc', '=', '"lower right"', ')', 'return', 'ax']
Plot expected repeat purchases on calibration period . Parameters ---------- model: lifetimes model A fitted lifetimes model. max_frequency: int, optional The maximum frequency to plot. title: str, optional Figure title xlabel: str, optional Figure xlabel ax: matplotlib.AxesSubplot, optional Using user axes label: str, optional Label for plot. kwargs Passed into the matplotlib.pyplot.plot command. Returns ------- axes: matplotlib.AxesSubplot
['Plot', 'expected', 'repeat', 'purchases', 'on', 'calibration', 'period', '.']
train
https://github.com/CamDavidsonPilon/lifetimes/blob/f926308bc03c17c1d12fead729de43885cf13321/lifetimes/plotting.py#L270-L326
8,231
LonamiWebs/Telethon
telethon/tl/custom/button.py
Button.request_location
def request_location(cls, text, *, resize=None, single_use=None, selective=None): """ Creates a new button that will request the user's location upon being clicked. ``resize``, ``single_use`` and ``selective`` are documented in `text`. """ return cls(types.KeyboardButtonRequestGeoLocation(text), resize=resize, single_use=single_use, selective=selective)
python
def request_location(cls, text, *, resize=None, single_use=None, selective=None): """ Creates a new button that will request the user's location upon being clicked. ``resize``, ``single_use`` and ``selective`` are documented in `text`. """ return cls(types.KeyboardButtonRequestGeoLocation(text), resize=resize, single_use=single_use, selective=selective)
['def', 'request_location', '(', 'cls', ',', 'text', ',', '*', ',', 'resize', '=', 'None', ',', 'single_use', '=', 'None', ',', 'selective', '=', 'None', ')', ':', 'return', 'cls', '(', 'types', '.', 'KeyboardButtonRequestGeoLocation', '(', 'text', ')', ',', 'resize', '=', 'resize', ',', 'single_use', '=', 'single_use', ',', 'selective', '=', 'selective', ')']
Creates a new button that will request the user's location upon being clicked. ``resize``, ``single_use`` and ``selective`` are documented in `text`.
['Creates', 'a', 'new', 'button', 'that', 'will', 'request', 'the', 'user', 's', 'location', 'upon', 'being', 'clicked', '.']
train
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/button.py#L122-L131
8,232
djgagne/hagelslag
hagelslag/processing/ObjectMatcher.py
centroid_distance
def centroid_distance(item_a, time_a, item_b, time_b, max_value): """ Euclidean distance between the centroids of item_a and item_b. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """ ax, ay = item_a.center_of_mass(time_a) bx, by = item_b.center_of_mass(time_b) return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
python
def centroid_distance(item_a, time_a, item_b, time_b, max_value): """ Euclidean distance between the centroids of item_a and item_b. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """ ax, ay = item_a.center_of_mass(time_a) bx, by = item_b.center_of_mass(time_b) return np.minimum(np.sqrt((ax - bx) ** 2 + (ay - by) ** 2), max_value) / float(max_value)
['def', 'centroid_distance', '(', 'item_a', ',', 'time_a', ',', 'item_b', ',', 'time_b', ',', 'max_value', ')', ':', 'ax', ',', 'ay', '=', 'item_a', '.', 'center_of_mass', '(', 'time_a', ')', 'bx', ',', 'by', '=', 'item_b', '.', 'center_of_mass', '(', 'time_b', ')', 'return', 'np', '.', 'minimum', '(', 'np', '.', 'sqrt', '(', '(', 'ax', '-', 'bx', ')', '**', '2', '+', '(', 'ay', '-', 'by', ')', '**', '2', ')', ',', 'max_value', ')', '/', 'float', '(', 'max_value', ')']
Euclidean distance between the centroids of item_a and item_b. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
['Euclidean', 'distance', 'between', 'the', 'centroids', 'of', 'item_a', 'and', 'item_b', '.']
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/ObjectMatcher.py#L246-L262
8,233
quasipedia/swaggery
swaggery/checker.py
Checker.checks
def checks(self): '''Return the list of all check methods.''' condition = lambda a: a.startswith('check_') return (getattr(self, a) for a in dir(self) if condition(a))
python
def checks(self): '''Return the list of all check methods.''' condition = lambda a: a.startswith('check_') return (getattr(self, a) for a in dir(self) if condition(a))
['def', 'checks', '(', 'self', ')', ':', 'condition', '=', 'lambda', 'a', ':', 'a', '.', 'startswith', '(', "'check_'", ')', 'return', '(', 'getattr', '(', 'self', ',', 'a', ')', 'for', 'a', 'in', 'dir', '(', 'self', ')', 'if', 'condition', '(', 'a', ')', ')']
Return the list of all check methods.
['Return', 'the', 'list', 'of', 'all', 'check', 'methods', '.']
train
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/checker.py#L43-L46
8,234
Julius2342/pyvlx
pyvlx/nodes.py
Nodes.add
def add(self, node): """Add Node, replace existing node if node with node_id is present.""" if not isinstance(node, Node): raise TypeError() for i, j in enumerate(self.__nodes): if j.node_id == node.node_id: self.__nodes[i] = node return self.__nodes.append(node)
python
def add(self, node): """Add Node, replace existing node if node with node_id is present.""" if not isinstance(node, Node): raise TypeError() for i, j in enumerate(self.__nodes): if j.node_id == node.node_id: self.__nodes[i] = node return self.__nodes.append(node)
['def', 'add', '(', 'self', ',', 'node', ')', ':', 'if', 'not', 'isinstance', '(', 'node', ',', 'Node', ')', ':', 'raise', 'TypeError', '(', ')', 'for', 'i', ',', 'j', 'in', 'enumerate', '(', 'self', '.', '__nodes', ')', ':', 'if', 'j', '.', 'node_id', '==', 'node', '.', 'node_id', ':', 'self', '.', '__nodes', '[', 'i', ']', '=', 'node', 'return', 'self', '.', '__nodes', '.', 'append', '(', 'node', ')']
Add Node, replace existing node if node with node_id is present.
['Add', 'Node', 'replace', 'existing', 'node', 'if', 'node', 'with', 'node_id', 'is', 'present', '.']
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/nodes.py#L51-L59
8,235
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/publisher/plos.py
PLoS.move_back_boxed_texts
def move_back_boxed_texts(self): """ The only intended use for this function is to patch a problem seen in at least one PLoS article (journal.pgen.0020002). This will move any <boxed-text> elements over to the receiving element, which is probably the main body. """ body = self.main.getroot().find('body') back = self.article.root.find('back') if back is None: return boxed_texts = back.xpath('.//boxed-text') for boxed_text in boxed_texts: body.append(deepcopy(boxed_text))
python
def move_back_boxed_texts(self): """ The only intended use for this function is to patch a problem seen in at least one PLoS article (journal.pgen.0020002). This will move any <boxed-text> elements over to the receiving element, which is probably the main body. """ body = self.main.getroot().find('body') back = self.article.root.find('back') if back is None: return boxed_texts = back.xpath('.//boxed-text') for boxed_text in boxed_texts: body.append(deepcopy(boxed_text))
['def', 'move_back_boxed_texts', '(', 'self', ')', ':', 'body', '=', 'self', '.', 'main', '.', 'getroot', '(', ')', '.', 'find', '(', "'body'", ')', 'back', '=', 'self', '.', 'article', '.', 'root', '.', 'find', '(', "'back'", ')', 'if', 'back', 'is', 'None', ':', 'return', 'boxed_texts', '=', 'back', '.', 'xpath', '(', "'.//boxed-text'", ')', 'for', 'boxed_text', 'in', 'boxed_texts', ':', 'body', '.', 'append', '(', 'deepcopy', '(', 'boxed_text', ')', ')']
The only intended use for this function is to patch a problem seen in at least one PLoS article (journal.pgen.0020002). This will move any <boxed-text> elements over to the receiving element, which is probably the main body.
['The', 'only', 'intended', 'use', 'for', 'this', 'function', 'is', 'to', 'patch', 'a', 'problem', 'seen', 'in', 'at', 'least', 'one', 'PLoS', 'article', '(', 'journal', '.', 'pgen', '.', '0020002', ')', '.', 'This', 'will', 'move', 'any', '<boxed', '-', 'text', '>', 'elements', 'over', 'to', 'the', 'receiving', 'element', 'which', 'is', 'probably', 'the', 'main', 'body', '.']
train
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L682-L695
8,236
mozilla/mozilla-django-oidc
mozilla_django_oidc/middleware.py
SessionRefresh.is_refreshable_url
def is_refreshable_url(self, request): """Takes a request and returns whether it triggers a refresh examination :arg HttpRequest request: :returns: boolean """ # Do not attempt to refresh the session if the OIDC backend is not used backend_session = request.session.get(BACKEND_SESSION_KEY) is_oidc_enabled = True if backend_session: auth_backend = import_string(backend_session) is_oidc_enabled = issubclass(auth_backend, OIDCAuthenticationBackend) return ( request.method == 'GET' and is_authenticated(request.user) and is_oidc_enabled and request.path not in self.exempt_urls )
python
def is_refreshable_url(self, request): """Takes a request and returns whether it triggers a refresh examination :arg HttpRequest request: :returns: boolean """ # Do not attempt to refresh the session if the OIDC backend is not used backend_session = request.session.get(BACKEND_SESSION_KEY) is_oidc_enabled = True if backend_session: auth_backend = import_string(backend_session) is_oidc_enabled = issubclass(auth_backend, OIDCAuthenticationBackend) return ( request.method == 'GET' and is_authenticated(request.user) and is_oidc_enabled and request.path not in self.exempt_urls )
['def', 'is_refreshable_url', '(', 'self', ',', 'request', ')', ':', '# Do not attempt to refresh the session if the OIDC backend is not used', 'backend_session', '=', 'request', '.', 'session', '.', 'get', '(', 'BACKEND_SESSION_KEY', ')', 'is_oidc_enabled', '=', 'True', 'if', 'backend_session', ':', 'auth_backend', '=', 'import_string', '(', 'backend_session', ')', 'is_oidc_enabled', '=', 'issubclass', '(', 'auth_backend', ',', 'OIDCAuthenticationBackend', ')', 'return', '(', 'request', '.', 'method', '==', "'GET'", 'and', 'is_authenticated', '(', 'request', '.', 'user', ')', 'and', 'is_oidc_enabled', 'and', 'request', '.', 'path', 'not', 'in', 'self', '.', 'exempt_urls', ')']
Takes a request and returns whether it triggers a refresh examination :arg HttpRequest request: :returns: boolean
['Takes', 'a', 'request', 'and', 'returns', 'whether', 'it', 'triggers', 'a', 'refresh', 'examination']
train
https://github.com/mozilla/mozilla-django-oidc/blob/e780130deacccbafc85a92f48d1407e042f5f955/mozilla_django_oidc/middleware.py#L67-L87
8,237
bukun/TorCMS
torcms/handlers/collect_handler.py
CollectHandler.add_or_update
def add_or_update(self, app_id): ''' Add or update the category. ''' logger.info('Collect info: user-{0}, uid-{1}'.format(self.userinfo.uid, app_id)) MCollect.add_or_update(self.userinfo.uid, app_id) out_dic = {'success': True} return json.dump(out_dic, self)
python
def add_or_update(self, app_id): ''' Add or update the category. ''' logger.info('Collect info: user-{0}, uid-{1}'.format(self.userinfo.uid, app_id)) MCollect.add_or_update(self.userinfo.uid, app_id) out_dic = {'success': True} return json.dump(out_dic, self)
['def', 'add_or_update', '(', 'self', ',', 'app_id', ')', ':', 'logger', '.', 'info', '(', "'Collect info: user-{0}, uid-{1}'", '.', 'format', '(', 'self', '.', 'userinfo', '.', 'uid', ',', 'app_id', ')', ')', 'MCollect', '.', 'add_or_update', '(', 'self', '.', 'userinfo', '.', 'uid', ',', 'app_id', ')', 'out_dic', '=', '{', "'success'", ':', 'True', '}', 'return', 'json', '.', 'dump', '(', 'out_dic', ',', 'self', ')']
Add or update the category.
['Add', 'or', 'update', 'the', 'category', '.']
train
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/collect_handler.py#L44-L51
8,238
lsbardel/python-stdnet
stdnet/odm/related.py
makeMany2ManyRelatedManager
def makeMany2ManyRelatedManager(formodel, name_relmodel, name_formodel): '''formodel is the model which the manager .''' class _Many2ManyRelatedManager(Many2ManyRelatedManager): pass _Many2ManyRelatedManager.formodel = formodel _Many2ManyRelatedManager.name_relmodel = name_relmodel _Many2ManyRelatedManager.name_formodel = name_formodel return _Many2ManyRelatedManager
python
def makeMany2ManyRelatedManager(formodel, name_relmodel, name_formodel): '''formodel is the model which the manager .''' class _Many2ManyRelatedManager(Many2ManyRelatedManager): pass _Many2ManyRelatedManager.formodel = formodel _Many2ManyRelatedManager.name_relmodel = name_relmodel _Many2ManyRelatedManager.name_formodel = name_formodel return _Many2ManyRelatedManager
['def', 'makeMany2ManyRelatedManager', '(', 'formodel', ',', 'name_relmodel', ',', 'name_formodel', ')', ':', 'class', '_Many2ManyRelatedManager', '(', 'Many2ManyRelatedManager', ')', ':', 'pass', '_Many2ManyRelatedManager', '.', 'formodel', '=', 'formodel', '_Many2ManyRelatedManager', '.', 'name_relmodel', '=', 'name_relmodel', '_Many2ManyRelatedManager', '.', 'name_formodel', '=', 'name_formodel', 'return', '_Many2ManyRelatedManager']
formodel is the model which the manager .
['formodel', 'is', 'the', 'model', 'which', 'the', 'manager', '.']
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/related.py#L265-L274
8,239
mabuchilab/QNET
src/qnet/algebra/core/state_algebra.py
BasisKet.next
def next(self, n=1): """Move up by `n` steps in the Hilbert space:: >>> hs = LocalSpace('tls', basis=('g', 'e')) >>> ascii(BasisKet('g', hs=hs).next()) '|e>^(tls)' >>> ascii(BasisKet(0, hs=hs).next()) '|e>^(tls)' We can also go multiple steps: >>> hs = LocalSpace('ten', dimension=10) >>> ascii(BasisKet(0, hs=hs).next(2)) '|2>^(ten)' An increment that leads out of the Hilbert space returns zero:: >>> BasisKet(0, hs=hs).next(10) ZeroKet """ if isinstance(self.label, SymbolicLabelBase): next_label = self.space.next_basis_label_or_index( self.label, n) return BasisKet(next_label, hs=self.space) else: try: next_index = self.space.next_basis_label_or_index( self.index, n) return BasisKet(next_index, hs=self.space) except IndexError: return ZeroKet
python
def next(self, n=1): """Move up by `n` steps in the Hilbert space:: >>> hs = LocalSpace('tls', basis=('g', 'e')) >>> ascii(BasisKet('g', hs=hs).next()) '|e>^(tls)' >>> ascii(BasisKet(0, hs=hs).next()) '|e>^(tls)' We can also go multiple steps: >>> hs = LocalSpace('ten', dimension=10) >>> ascii(BasisKet(0, hs=hs).next(2)) '|2>^(ten)' An increment that leads out of the Hilbert space returns zero:: >>> BasisKet(0, hs=hs).next(10) ZeroKet """ if isinstance(self.label, SymbolicLabelBase): next_label = self.space.next_basis_label_or_index( self.label, n) return BasisKet(next_label, hs=self.space) else: try: next_index = self.space.next_basis_label_or_index( self.index, n) return BasisKet(next_index, hs=self.space) except IndexError: return ZeroKet
['def', 'next', '(', 'self', ',', 'n', '=', '1', ')', ':', 'if', 'isinstance', '(', 'self', '.', 'label', ',', 'SymbolicLabelBase', ')', ':', 'next_label', '=', 'self', '.', 'space', '.', 'next_basis_label_or_index', '(', 'self', '.', 'label', ',', 'n', ')', 'return', 'BasisKet', '(', 'next_label', ',', 'hs', '=', 'self', '.', 'space', ')', 'else', ':', 'try', ':', 'next_index', '=', 'self', '.', 'space', '.', 'next_basis_label_or_index', '(', 'self', '.', 'index', ',', 'n', ')', 'return', 'BasisKet', '(', 'next_index', ',', 'hs', '=', 'self', '.', 'space', ')', 'except', 'IndexError', ':', 'return', 'ZeroKet']
Move up by `n` steps in the Hilbert space:: >>> hs = LocalSpace('tls', basis=('g', 'e')) >>> ascii(BasisKet('g', hs=hs).next()) '|e>^(tls)' >>> ascii(BasisKet(0, hs=hs).next()) '|e>^(tls)' We can also go multiple steps: >>> hs = LocalSpace('ten', dimension=10) >>> ascii(BasisKet(0, hs=hs).next(2)) '|2>^(ten)' An increment that leads out of the Hilbert space returns zero:: >>> BasisKet(0, hs=hs).next(10) ZeroKet
['Move', 'up', 'by', 'n', 'steps', 'in', 'the', 'Hilbert', 'space', '::']
train
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/state_algebra.py#L322-L353
8,240
spyder-ide/spyder-kernels
spyder_kernels/console/kernel.py
SpyderKernel.load_data
def load_data(self, filename, ext): """Load data from filename""" from spyder_kernels.utils.iofuncs import iofunctions from spyder_kernels.utils.misc import fix_reference_name glbs = self._mglobals() load_func = iofunctions.load_funcs[ext] data, error_message = load_func(filename) if error_message: return error_message for key in list(data.keys()): new_key = fix_reference_name(key, blacklist=list(glbs.keys())) if new_key != key: data[new_key] = data.pop(key) try: glbs.update(data) except Exception as error: return str(error) return None
python
def load_data(self, filename, ext): """Load data from filename""" from spyder_kernels.utils.iofuncs import iofunctions from spyder_kernels.utils.misc import fix_reference_name glbs = self._mglobals() load_func = iofunctions.load_funcs[ext] data, error_message = load_func(filename) if error_message: return error_message for key in list(data.keys()): new_key = fix_reference_name(key, blacklist=list(glbs.keys())) if new_key != key: data[new_key] = data.pop(key) try: glbs.update(data) except Exception as error: return str(error) return None
['def', 'load_data', '(', 'self', ',', 'filename', ',', 'ext', ')', ':', 'from', 'spyder_kernels', '.', 'utils', '.', 'iofuncs', 'import', 'iofunctions', 'from', 'spyder_kernels', '.', 'utils', '.', 'misc', 'import', 'fix_reference_name', 'glbs', '=', 'self', '.', '_mglobals', '(', ')', 'load_func', '=', 'iofunctions', '.', 'load_funcs', '[', 'ext', ']', 'data', ',', 'error_message', '=', 'load_func', '(', 'filename', ')', 'if', 'error_message', ':', 'return', 'error_message', 'for', 'key', 'in', 'list', '(', 'data', '.', 'keys', '(', ')', ')', ':', 'new_key', '=', 'fix_reference_name', '(', 'key', ',', 'blacklist', '=', 'list', '(', 'glbs', '.', 'keys', '(', ')', ')', ')', 'if', 'new_key', '!=', 'key', ':', 'data', '[', 'new_key', ']', '=', 'data', '.', 'pop', '(', 'key', ')', 'try', ':', 'glbs', '.', 'update', '(', 'data', ')', 'except', 'Exception', 'as', 'error', ':', 'return', 'str', '(', 'error', ')', 'return', 'None']
Load data from filename
['Load', 'data', 'from', 'filename']
train
https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L194-L217
8,241
jlmadurga/permabots
permabots/views/hooks/telegram_hook.py
TelegramHookView.post
def post(self, request, hook_id): """ Process Telegram webhook. 1. Serialize Telegram message 2. Get an enabled Telegram bot 3. Create :class:`Update <permabots.models.telegram_api.Update>` 5. Delay processing to a task 6. Response provider """ serializer = UpdateSerializer(data=request.data) if serializer.is_valid(): try: bot = caching.get_or_set(TelegramBot, hook_id) except TelegramBot.DoesNotExist: logger.warning("Hook id %s not associated to an bot" % hook_id) return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND) try: update = self.create_update(serializer, bot) if bot.enabled: logger.debug("Telegram Bot %s attending request %s" % (bot.token, request.data)) handle_update.delay(update.id, bot.id) else: logger.error("Update %s ignored by disabled bot %s" % (update, bot.token)) except OnlyTextMessages: logger.warning("Not text message %s for bot %s" % (request.data, hook_id)) return Response(status=status.HTTP_200_OK) except: exc_info = sys.exc_info() traceback.print_exception(*exc_info) logger.error("Error processing %s for bot %s" % (request.data, hook_id)) return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR) else: return Response(serializer.data, status=status.HTTP_200_OK) logger.error("Validation error: %s from message %s" % (serializer.errors, request.data)) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
python
def post(self, request, hook_id): """ Process Telegram webhook. 1. Serialize Telegram message 2. Get an enabled Telegram bot 3. Create :class:`Update <permabots.models.telegram_api.Update>` 5. Delay processing to a task 6. Response provider """ serializer = UpdateSerializer(data=request.data) if serializer.is_valid(): try: bot = caching.get_or_set(TelegramBot, hook_id) except TelegramBot.DoesNotExist: logger.warning("Hook id %s not associated to an bot" % hook_id) return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND) try: update = self.create_update(serializer, bot) if bot.enabled: logger.debug("Telegram Bot %s attending request %s" % (bot.token, request.data)) handle_update.delay(update.id, bot.id) else: logger.error("Update %s ignored by disabled bot %s" % (update, bot.token)) except OnlyTextMessages: logger.warning("Not text message %s for bot %s" % (request.data, hook_id)) return Response(status=status.HTTP_200_OK) except: exc_info = sys.exc_info() traceback.print_exception(*exc_info) logger.error("Error processing %s for bot %s" % (request.data, hook_id)) return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR) else: return Response(serializer.data, status=status.HTTP_200_OK) logger.error("Validation error: %s from message %s" % (serializer.errors, request.data)) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
['def', 'post', '(', 'self', ',', 'request', ',', 'hook_id', ')', ':', 'serializer', '=', 'UpdateSerializer', '(', 'data', '=', 'request', '.', 'data', ')', 'if', 'serializer', '.', 'is_valid', '(', ')', ':', 'try', ':', 'bot', '=', 'caching', '.', 'get_or_set', '(', 'TelegramBot', ',', 'hook_id', ')', 'except', 'TelegramBot', '.', 'DoesNotExist', ':', 'logger', '.', 'warning', '(', '"Hook id %s not associated to an bot"', '%', 'hook_id', ')', 'return', 'Response', '(', 'serializer', '.', 'errors', ',', 'status', '=', 'status', '.', 'HTTP_404_NOT_FOUND', ')', 'try', ':', 'update', '=', 'self', '.', 'create_update', '(', 'serializer', ',', 'bot', ')', 'if', 'bot', '.', 'enabled', ':', 'logger', '.', 'debug', '(', '"Telegram Bot %s attending request %s"', '%', '(', 'bot', '.', 'token', ',', 'request', '.', 'data', ')', ')', 'handle_update', '.', 'delay', '(', 'update', '.', 'id', ',', 'bot', '.', 'id', ')', 'else', ':', 'logger', '.', 'error', '(', '"Update %s ignored by disabled bot %s"', '%', '(', 'update', ',', 'bot', '.', 'token', ')', ')', 'except', 'OnlyTextMessages', ':', 'logger', '.', 'warning', '(', '"Not text message %s for bot %s"', '%', '(', 'request', '.', 'data', ',', 'hook_id', ')', ')', 'return', 'Response', '(', 'status', '=', 'status', '.', 'HTTP_200_OK', ')', 'except', ':', 'exc_info', '=', 'sys', '.', 'exc_info', '(', ')', 'traceback', '.', 'print_exception', '(', '*', 'exc_info', ')', 'logger', '.', 'error', '(', '"Error processing %s for bot %s"', '%', '(', 'request', '.', 'data', ',', 'hook_id', ')', ')', 'return', 'Response', '(', 'serializer', '.', 'errors', ',', 'status', '=', 'status', '.', 'HTTP_500_INTERNAL_SERVER_ERROR', ')', 'else', ':', 'return', 'Response', '(', 'serializer', '.', 'data', ',', 'status', '=', 'status', '.', 'HTTP_200_OK', ')', 'logger', '.', 'error', '(', '"Validation error: %s from message %s"', '%', '(', 'serializer', '.', 'errors', ',', 'request', '.', 'data', ')', ')', 'return', 'Response', '(', 'serializer', '.', 'errors', ',', 'status', '=', 'status', '.', 'HTTP_400_BAD_REQUEST', ')']
Process Telegram webhook. 1. Serialize Telegram message 2. Get an enabled Telegram bot 3. Create :class:`Update <permabots.models.telegram_api.Update>` 5. Delay processing to a task 6. Response provider
['Process', 'Telegram', 'webhook', '.', '1', '.', 'Serialize', 'Telegram', 'message', '2', '.', 'Get', 'an', 'enabled', 'Telegram', 'bot', '3', '.', 'Create', ':', 'class', ':', 'Update', '<permabots', '.', 'models', '.', 'telegram_api', '.', 'Update', '>', '5', '.', 'Delay', 'processing', 'to', 'a', 'task', '6', '.', 'Response', 'provider']
train
https://github.com/jlmadurga/permabots/blob/781a91702529a23fe7bc2aa84c5d88e961412466/permabots/views/hooks/telegram_hook.py#L87-L121
8,242
saltstack/salt
salt/modules/elasticsearch.py
index_template_exists
def index_template_exists(name, hosts=None, profile=None): ''' Return a boolean indicating whether given index template exists name Index template name CLI example:: salt myminion elasticsearch.index_template_exists testindex_templ ''' es = _get_instance(hosts, profile) try: return es.indices.exists_template(name=name) except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot retrieve template {0}, server returned code {1} with message {2}".format(name, e.status_code, e.error))
python
def index_template_exists(name, hosts=None, profile=None): ''' Return a boolean indicating whether given index template exists name Index template name CLI example:: salt myminion elasticsearch.index_template_exists testindex_templ ''' es = _get_instance(hosts, profile) try: return es.indices.exists_template(name=name) except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot retrieve template {0}, server returned code {1} with message {2}".format(name, e.status_code, e.error))
['def', 'index_template_exists', '(', 'name', ',', 'hosts', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'es', '=', '_get_instance', '(', 'hosts', ',', 'profile', ')', 'try', ':', 'return', 'es', '.', 'indices', '.', 'exists_template', '(', 'name', '=', 'name', ')', 'except', 'elasticsearch', '.', 'TransportError', 'as', 'e', ':', 'raise', 'CommandExecutionError', '(', '"Cannot retrieve template {0}, server returned code {1} with message {2}"', '.', 'format', '(', 'name', ',', 'e', '.', 'status_code', ',', 'e', '.', 'error', ')', ')']
Return a boolean indicating whether given index template exists name Index template name CLI example:: salt myminion elasticsearch.index_template_exists testindex_templ
['Return', 'a', 'boolean', 'indicating', 'whether', 'given', 'index', 'template', 'exists']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/elasticsearch.py#L776-L791
8,243
sckott/pygbif
pygbif/occurrences/get.py
get_verbatim
def get_verbatim(key, **kwargs): ''' Gets a verbatim occurrence record without any interpretation :param key: [int] A GBIF occurrence key :return: A dictionary, of results Usage:: from pygbif import occurrences occurrences.get_verbatim(key = 1258202889) occurrences.get_verbatim(key = 1227768771) occurrences.get_verbatim(key = 1227769518) ''' url = gbif_baseurl + 'occurrence/' + str(key) + '/verbatim' out = gbif_GET(url, {}, **kwargs) return out
python
def get_verbatim(key, **kwargs): ''' Gets a verbatim occurrence record without any interpretation :param key: [int] A GBIF occurrence key :return: A dictionary, of results Usage:: from pygbif import occurrences occurrences.get_verbatim(key = 1258202889) occurrences.get_verbatim(key = 1227768771) occurrences.get_verbatim(key = 1227769518) ''' url = gbif_baseurl + 'occurrence/' + str(key) + '/verbatim' out = gbif_GET(url, {}, **kwargs) return out
['def', 'get_verbatim', '(', 'key', ',', '*', '*', 'kwargs', ')', ':', 'url', '=', 'gbif_baseurl', '+', "'occurrence/'", '+', 'str', '(', 'key', ')', '+', "'/verbatim'", 'out', '=', 'gbif_GET', '(', 'url', ',', '{', '}', ',', '*', '*', 'kwargs', ')', 'return', 'out']
Gets a verbatim occurrence record without any interpretation :param key: [int] A GBIF occurrence key :return: A dictionary, of results Usage:: from pygbif import occurrences occurrences.get_verbatim(key = 1258202889) occurrences.get_verbatim(key = 1227768771) occurrences.get_verbatim(key = 1227769518)
['Gets', 'a', 'verbatim', 'occurrence', 'record', 'without', 'any', 'interpretation']
train
https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/get.py#L22-L39
8,244
google/grr
grr/server/grr_response_server/aff4.py
AttributeExpression.Compile
def Compile(self, filter_implemention): """Returns the data_store filter implementation from the attribute.""" return self.operator_method(self.attribute_obj, filter_implemention, *self.args)
python
def Compile(self, filter_implemention): """Returns the data_store filter implementation from the attribute.""" return self.operator_method(self.attribute_obj, filter_implemention, *self.args)
['def', 'Compile', '(', 'self', ',', 'filter_implemention', ')', ':', 'return', 'self', '.', 'operator_method', '(', 'self', '.', 'attribute_obj', ',', 'filter_implemention', ',', '*', 'self', '.', 'args', ')']
Returns the data_store filter implementation from the attribute.
['Returns', 'the', 'data_store', 'filter', 'implementation', 'from', 'the', 'attribute', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L2373-L2376
8,245
kennethreitz/records
records.py
Record.dataset
def dataset(self): """A Tablib Dataset containing the row.""" data = tablib.Dataset() data.headers = self.keys() row = _reduce_datetimes(self.values()) data.append(row) return data
python
def dataset(self): """A Tablib Dataset containing the row.""" data = tablib.Dataset() data.headers = self.keys() row = _reduce_datetimes(self.values()) data.append(row) return data
['def', 'dataset', '(', 'self', ')', ':', 'data', '=', 'tablib', '.', 'Dataset', '(', ')', 'data', '.', 'headers', '=', 'self', '.', 'keys', '(', ')', 'row', '=', '_reduce_datetimes', '(', 'self', '.', 'values', '(', ')', ')', 'data', '.', 'append', '(', 'row', ')', 'return', 'data']
A Tablib Dataset containing the row.
['A', 'Tablib', 'Dataset', 'containing', 'the', 'row', '.']
train
https://github.com/kennethreitz/records/blob/ecd857266c5e7830d657cbe0196816314790563b/records.py#L88-L96
8,246
DeV1doR/aioethereum
aioethereum/utils.py
add_0x
def add_0x(string): """Add 0x to string at start. """ if isinstance(string, bytes): string = string.decode('utf-8') return '0x' + str(string)
python
def add_0x(string): """Add 0x to string at start. """ if isinstance(string, bytes): string = string.decode('utf-8') return '0x' + str(string)
['def', 'add_0x', '(', 'string', ')', ':', 'if', 'isinstance', '(', 'string', ',', 'bytes', ')', ':', 'string', '=', 'string', '.', 'decode', '(', "'utf-8'", ')', 'return', "'0x'", '+', 'str', '(', 'string', ')']
Add 0x to string at start.
['Add', '0x', 'to', 'string', 'at', 'start', '.']
train
https://github.com/DeV1doR/aioethereum/blob/85eb46550d862b3ccc309914ea871ca1c7b42157/aioethereum/utils.py#L4-L9
8,247
rochacbruno/python-pagseguro
examples/flask/flask_seguro/controllers/main/__init__.py
add_to_cart
def add_to_cart(item_id): """ Cart with Product """ cart = Cart(session['cart']) if cart.change_item(item_id, 'add'): session['cart'] = cart.to_dict() return list_products()
python
def add_to_cart(item_id): """ Cart with Product """ cart = Cart(session['cart']) if cart.change_item(item_id, 'add'): session['cart'] = cart.to_dict() return list_products()
['def', 'add_to_cart', '(', 'item_id', ')', ':', 'cart', '=', 'Cart', '(', 'session', '[', "'cart'", ']', ')', 'if', 'cart', '.', 'change_item', '(', 'item_id', ',', "'add'", ')', ':', 'session', '[', "'cart'", ']', '=', 'cart', '.', 'to_dict', '(', ')', 'return', 'list_products', '(', ')']
Cart with Product
['Cart', 'with', 'Product']
train
https://github.com/rochacbruno/python-pagseguro/blob/18a9ca3301783cb323e838574b59f9ddffa9a593/examples/flask/flask_seguro/controllers/main/__init__.py#L44-L49
8,248
funkybob/antfarm
antfarm/response.py
Response.build_headers
def build_headers(self): ''' Return the list of headers as two-tuples ''' if not 'Content-Type' in self.headers: content_type = self.content_type if self.encoding != DEFAULT_ENCODING: content_type += '; charset=%s' % self.encoding self.headers['Content-Type'] = content_type headers = list(self.headers.items()) # Append cookies headers += [ ('Set-Cookie', cookie.OutputString()) for cookie in self.cookies.values() ] return headers
python
def build_headers(self): ''' Return the list of headers as two-tuples ''' if not 'Content-Type' in self.headers: content_type = self.content_type if self.encoding != DEFAULT_ENCODING: content_type += '; charset=%s' % self.encoding self.headers['Content-Type'] = content_type headers = list(self.headers.items()) # Append cookies headers += [ ('Set-Cookie', cookie.OutputString()) for cookie in self.cookies.values() ] return headers
['def', 'build_headers', '(', 'self', ')', ':', 'if', 'not', "'Content-Type'", 'in', 'self', '.', 'headers', ':', 'content_type', '=', 'self', '.', 'content_type', 'if', 'self', '.', 'encoding', '!=', 'DEFAULT_ENCODING', ':', 'content_type', '+=', "'; charset=%s'", '%', 'self', '.', 'encoding', 'self', '.', 'headers', '[', "'Content-Type'", ']', '=', 'content_type', 'headers', '=', 'list', '(', 'self', '.', 'headers', '.', 'items', '(', ')', ')', '# Append cookies', 'headers', '+=', '[', '(', "'Set-Cookie'", ',', 'cookie', '.', 'OutputString', '(', ')', ')', 'for', 'cookie', 'in', 'self', '.', 'cookies', '.', 'values', '(', ')', ']', 'return', 'headers']
Return the list of headers as two-tuples
['Return', 'the', 'list', 'of', 'headers', 'as', 'two', '-', 'tuples']
train
https://github.com/funkybob/antfarm/blob/40a7cc450eba09a280b7bc8f7c68a807b0177c62/antfarm/response.py#L90-L106
8,249
rix0rrr/gcl
gcl/doc.py
sort_members
def sort_members(tup, names): """Return two pairs of members, scalar and tuple members. The scalars will be sorted s.t. the unbound members are at the top. """ scalars, tuples = partition(lambda x: not is_tuple_node(tup.member[x].value), names) unbound, bound = partition(lambda x: tup.member[x].value.is_unbound(), scalars) return usorted(unbound) + usorted(bound), usorted(tuples)
python
def sort_members(tup, names): """Return two pairs of members, scalar and tuple members. The scalars will be sorted s.t. the unbound members are at the top. """ scalars, tuples = partition(lambda x: not is_tuple_node(tup.member[x].value), names) unbound, bound = partition(lambda x: tup.member[x].value.is_unbound(), scalars) return usorted(unbound) + usorted(bound), usorted(tuples)
['def', 'sort_members', '(', 'tup', ',', 'names', ')', ':', 'scalars', ',', 'tuples', '=', 'partition', '(', 'lambda', 'x', ':', 'not', 'is_tuple_node', '(', 'tup', '.', 'member', '[', 'x', ']', '.', 'value', ')', ',', 'names', ')', 'unbound', ',', 'bound', '=', 'partition', '(', 'lambda', 'x', ':', 'tup', '.', 'member', '[', 'x', ']', '.', 'value', '.', 'is_unbound', '(', ')', ',', 'scalars', ')', 'return', 'usorted', '(', 'unbound', ')', '+', 'usorted', '(', 'bound', ')', ',', 'usorted', '(', 'tuples', ')']
Return two pairs of members, scalar and tuple members. The scalars will be sorted s.t. the unbound members are at the top.
['Return', 'two', 'pairs', 'of', 'members', 'scalar', 'and', 'tuple', 'members', '.']
train
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/doc.py#L229-L236
8,250
saltstack/salt
salt/modules/virt.py
pool_build
def pool_build(name, **kwargs): ''' Build a defined libvirt storage pool. :param name: libvirt storage pool name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.pool_build default ''' conn = __get_conn(**kwargs) try: pool = conn.storagePoolLookupByName(name) return not bool(pool.build()) finally: conn.close()
python
def pool_build(name, **kwargs): ''' Build a defined libvirt storage pool. :param name: libvirt storage pool name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.pool_build default ''' conn = __get_conn(**kwargs) try: pool = conn.storagePoolLookupByName(name) return not bool(pool.build()) finally: conn.close()
['def', 'pool_build', '(', 'name', ',', '*', '*', 'kwargs', ')', ':', 'conn', '=', '__get_conn', '(', '*', '*', 'kwargs', ')', 'try', ':', 'pool', '=', 'conn', '.', 'storagePoolLookupByName', '(', 'name', ')', 'return', 'not', 'bool', '(', 'pool', '.', 'build', '(', ')', ')', 'finally', ':', 'conn', '.', 'close', '(', ')']
Build a defined libvirt storage pool. :param name: libvirt storage pool name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.pool_build default
['Build', 'a', 'defined', 'libvirt', 'storage', 'pool', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L5085-L5107
8,251
wavycloud/pyboto3
pyboto3/cloudformation.py
create_stack
def create_stack(StackName=None, TemplateBody=None, TemplateURL=None, Parameters=None, DisableRollback=None, TimeoutInMinutes=None, NotificationARNs=None, Capabilities=None, ResourceTypes=None, RoleARN=None, OnFailure=None, StackPolicyBody=None, StackPolicyURL=None, Tags=None, ClientRequestToken=None): """ Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack via the DescribeStacks API. See also: AWS API Documentation :example: response = client.create_stack( StackName='string', TemplateBody='string', TemplateURL='string', Parameters=[ { 'ParameterKey': 'string', 'ParameterValue': 'string', 'UsePreviousValue': True|False }, ], DisableRollback=True|False, TimeoutInMinutes=123, NotificationARNs=[ 'string', ], Capabilities=[ 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM', ], ResourceTypes=[ 'string', ], RoleARN='string', OnFailure='DO_NOTHING'|'ROLLBACK'|'DELETE', StackPolicyBody='string', StackPolicyURL='string', Tags=[ { 'Key': 'string', 'Value': 'string' }, ], ClientRequestToken='string' ) :type StackName: string :param StackName: [REQUIRED] The name that is associated with the stack. The name must be unique in the region in which you are creating the stack. Note A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters. :type TemplateBody: string :param TemplateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. :type TemplateURL: string :param TemplateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to the Template Anatomy in the AWS CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. :type Parameters: list :param Parameters: A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type. (dict) --The Parameter data type. ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template. ParameterValue (string) --The value associated with the parameter. UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value. :type DisableRollback: boolean :param DisableRollback: Set to true to disable rollback of the stack if stack creation failed. You can specify either DisableRollback or OnFailure , but not both. Default: false :type TimeoutInMinutes: integer :param TimeoutInMinutes: The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false , the stack will be rolled back. :type NotificationARNs: list :param NotificationARNs: The Simple Notification Service (SNS) topic ARNs to publish stack related events. You can find your SNS topic ARNs using the SNS console or your Command Line Interface (CLI). (string) -- :type Capabilities: list :param Capabilities: A list of values that you must specify before AWS CloudFormation can create certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter. The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM . The following resources require you to specify this parameter: AWS::IAM::AccessKey , AWS::IAM::Group , AWS::IAM::InstanceProfile , AWS::IAM::Policy , AWS::IAM::Role , AWS::IAM::User , and AWS::IAM::UserToGroupAddition . If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM . If you don't specify this parameter, this action returns an InsufficientCapabilities error. For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates . (string) -- :type ResourceTypes: list :param ResourceTypes: The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance , AWS::EC2::* , or Custom::MyCustomInstance . Use the following syntax to describe template resource types: AWS::* (for all AWS resource), Custom::* (for all custom resources), Custom::*logical_ID* `` (for a specific custom resource), ``AWS::*service_name* ::* (for all resources of a particular AWS service), and ``AWS::service_name ::resource_logical_ID `` (for a specific AWS resource). If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management . (string) -- :type RoleARN: string :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to create the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials. :type OnFailure: string :param OnFailure: Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback , but not both. Default: ROLLBACK :type StackPolicyBody: string :param StackPolicyBody: Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide . You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. :type StackPolicyURL: string :param StackPolicyURL: Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. :type Tags: list :param Tags: Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 10 tags can be specified. (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack. Key (string) -- Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: . Value (string) -- Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value. :type ClientRequestToken: string :param ClientRequestToken: A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that AWS CloudFormation successfully received them. :rtype: dict :return: { 'StackId': 'string' } """ pass
python
def create_stack(StackName=None, TemplateBody=None, TemplateURL=None, Parameters=None, DisableRollback=None, TimeoutInMinutes=None, NotificationARNs=None, Capabilities=None, ResourceTypes=None, RoleARN=None, OnFailure=None, StackPolicyBody=None, StackPolicyURL=None, Tags=None, ClientRequestToken=None): """ Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack via the DescribeStacks API. See also: AWS API Documentation :example: response = client.create_stack( StackName='string', TemplateBody='string', TemplateURL='string', Parameters=[ { 'ParameterKey': 'string', 'ParameterValue': 'string', 'UsePreviousValue': True|False }, ], DisableRollback=True|False, TimeoutInMinutes=123, NotificationARNs=[ 'string', ], Capabilities=[ 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM', ], ResourceTypes=[ 'string', ], RoleARN='string', OnFailure='DO_NOTHING'|'ROLLBACK'|'DELETE', StackPolicyBody='string', StackPolicyURL='string', Tags=[ { 'Key': 'string', 'Value': 'string' }, ], ClientRequestToken='string' ) :type StackName: string :param StackName: [REQUIRED] The name that is associated with the stack. The name must be unique in the region in which you are creating the stack. Note A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters. :type TemplateBody: string :param TemplateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. :type TemplateURL: string :param TemplateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to the Template Anatomy in the AWS CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. :type Parameters: list :param Parameters: A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type. (dict) --The Parameter data type. ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template. ParameterValue (string) --The value associated with the parameter. UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value. :type DisableRollback: boolean :param DisableRollback: Set to true to disable rollback of the stack if stack creation failed. You can specify either DisableRollback or OnFailure , but not both. Default: false :type TimeoutInMinutes: integer :param TimeoutInMinutes: The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false , the stack will be rolled back. :type NotificationARNs: list :param NotificationARNs: The Simple Notification Service (SNS) topic ARNs to publish stack related events. You can find your SNS topic ARNs using the SNS console or your Command Line Interface (CLI). (string) -- :type Capabilities: list :param Capabilities: A list of values that you must specify before AWS CloudFormation can create certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter. The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM . The following resources require you to specify this parameter: AWS::IAM::AccessKey , AWS::IAM::Group , AWS::IAM::InstanceProfile , AWS::IAM::Policy , AWS::IAM::Role , AWS::IAM::User , and AWS::IAM::UserToGroupAddition . If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM . If you don't specify this parameter, this action returns an InsufficientCapabilities error. For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates . (string) -- :type ResourceTypes: list :param ResourceTypes: The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance , AWS::EC2::* , or Custom::MyCustomInstance . Use the following syntax to describe template resource types: AWS::* (for all AWS resource), Custom::* (for all custom resources), Custom::*logical_ID* `` (for a specific custom resource), ``AWS::*service_name* ::* (for all resources of a particular AWS service), and ``AWS::service_name ::resource_logical_ID `` (for a specific AWS resource). If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management . (string) -- :type RoleARN: string :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to create the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials. :type OnFailure: string :param OnFailure: Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback , but not both. Default: ROLLBACK :type StackPolicyBody: string :param StackPolicyBody: Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide . You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. :type StackPolicyURL: string :param StackPolicyURL: Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. :type Tags: list :param Tags: Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 10 tags can be specified. (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack. Key (string) -- Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: . Value (string) -- Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value. :type ClientRequestToken: string :param ClientRequestToken: A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that AWS CloudFormation successfully received them. :rtype: dict :return: { 'StackId': 'string' } """ pass
['def', 'create_stack', '(', 'StackName', '=', 'None', ',', 'TemplateBody', '=', 'None', ',', 'TemplateURL', '=', 'None', ',', 'Parameters', '=', 'None', ',', 'DisableRollback', '=', 'None', ',', 'TimeoutInMinutes', '=', 'None', ',', 'NotificationARNs', '=', 'None', ',', 'Capabilities', '=', 'None', ',', 'ResourceTypes', '=', 'None', ',', 'RoleARN', '=', 'None', ',', 'OnFailure', '=', 'None', ',', 'StackPolicyBody', '=', 'None', ',', 'StackPolicyURL', '=', 'None', ',', 'Tags', '=', 'None', ',', 'ClientRequestToken', '=', 'None', ')', ':', 'pass']
Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack via the DescribeStacks API. See also: AWS API Documentation :example: response = client.create_stack( StackName='string', TemplateBody='string', TemplateURL='string', Parameters=[ { 'ParameterKey': 'string', 'ParameterValue': 'string', 'UsePreviousValue': True|False }, ], DisableRollback=True|False, TimeoutInMinutes=123, NotificationARNs=[ 'string', ], Capabilities=[ 'CAPABILITY_IAM'|'CAPABILITY_NAMED_IAM', ], ResourceTypes=[ 'string', ], RoleARN='string', OnFailure='DO_NOTHING'|'ROLLBACK'|'DELETE', StackPolicyBody='string', StackPolicyURL='string', Tags=[ { 'Key': 'string', 'Value': 'string' }, ], ClientRequestToken='string' ) :type StackName: string :param StackName: [REQUIRED] The name that is associated with the stack. The name must be unique in the region in which you are creating the stack. Note A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetic character and cannot be longer than 128 characters. :type TemplateBody: string :param TemplateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the AWS CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. :type TemplateURL: string :param TemplateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket. For more information, go to the Template Anatomy in the AWS CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. :type Parameters: list :param Parameters: A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type. (dict) --The Parameter data type. ParameterKey (string) --The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation uses the default value that is specified in your template. ParameterValue (string) --The value associated with the parameter. UsePreviousValue (boolean) --During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true , do not specify a parameter value. :type DisableRollback: boolean :param DisableRollback: Set to true to disable rollback of the stack if stack creation failed. You can specify either DisableRollback or OnFailure , but not both. Default: false :type TimeoutInMinutes: integer :param TimeoutInMinutes: The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false , the stack will be rolled back. :type NotificationARNs: list :param NotificationARNs: The Simple Notification Service (SNS) topic ARNs to publish stack related events. You can find your SNS topic ARNs using the SNS console or your Command Line Interface (CLI). (string) -- :type Capabilities: list :param Capabilities: A list of values that you must specify before AWS CloudFormation can create certain stacks. Some stack templates might include resources that can affect permissions in your AWS account, for example, by creating new AWS Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge their capabilities by specifying this parameter. The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM . The following resources require you to specify this parameter: AWS::IAM::AccessKey , AWS::IAM::Group , AWS::IAM::InstanceProfile , AWS::IAM::Policy , AWS::IAM::Role , AWS::IAM::User , and AWS::IAM::UserToGroupAddition . If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM . If you don't specify this parameter, this action returns an InsufficientCapabilities error. For more information, see Acknowledging IAM Resources in AWS CloudFormation Templates . (string) -- :type ResourceTypes: list :param ResourceTypes: The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance , AWS::EC2::* , or Custom::MyCustomInstance . Use the following syntax to describe template resource types: AWS::* (for all AWS resource), Custom::* (for all custom resources), Custom::*logical_ID* `` (for a specific custom resource), ``AWS::*service_name* ::* (for all resources of a particular AWS service), and ``AWS::service_name ::resource_logical_ID `` (for a specific AWS resource). If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, AWS CloudFormation grants permissions to all resource types. AWS Identity and Access Management (IAM) uses this parameter for AWS CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with AWS Identity and Access Management . (string) -- :type RoleARN: string :param RoleARN: The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to create the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. As long as users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials. :type OnFailure: string :param OnFailure: Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback , but not both. Default: ROLLBACK :type StackPolicyBody: string :param StackPolicyBody: Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the AWS CloudFormation User Guide . You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. :type StackPolicyURL: string :param StackPolicyURL: Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. :type Tags: list :param Tags: Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 10 tags can be specified. (dict) --The Tag type enables you to specify a key-value pair that can be used to store information about an AWS CloudFormation stack. Key (string) -- Required . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (AWS) have the reserved prefix: aws: . Value (string) -- Required . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value. :type ClientRequestToken: string :param ClientRequestToken: A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that AWS CloudFormation successfully received them. :rtype: dict :return: { 'StackId': 'string' }
['Creates', 'a', 'stack', 'as', 'specified', 'in', 'the', 'template', '.', 'After', 'the', 'call', 'completes', 'successfully', 'the', 'stack', 'creation', 'starts', '.', 'You', 'can', 'check', 'the', 'status', 'of', 'the', 'stack', 'via', 'the', 'DescribeStacks', 'API', '.', 'See', 'also', ':', 'AWS', 'API', 'Documentation', ':', 'example', ':', 'response', '=', 'client', '.', 'create_stack', '(', 'StackName', '=', 'string', 'TemplateBody', '=', 'string', 'TemplateURL', '=', 'string', 'Parameters', '=', '[', '{', 'ParameterKey', ':', 'string', 'ParameterValue', ':', 'string', 'UsePreviousValue', ':', 'True|False', '}', ']', 'DisableRollback', '=', 'True|False', 'TimeoutInMinutes', '=', '123', 'NotificationARNs', '=', '[', 'string', ']', 'Capabilities', '=', '[', 'CAPABILITY_IAM', '|', 'CAPABILITY_NAMED_IAM', ']', 'ResourceTypes', '=', '[', 'string', ']', 'RoleARN', '=', 'string', 'OnFailure', '=', 'DO_NOTHING', '|', 'ROLLBACK', '|', 'DELETE', 'StackPolicyBody', '=', 'string', 'StackPolicyURL', '=', 'string', 'Tags', '=', '[', '{', 'Key', ':', 'string', 'Value', ':', 'string', '}', ']', 'ClientRequestToken', '=', 'string', ')', ':', 'type', 'StackName', ':', 'string', ':', 'param', 'StackName', ':', '[', 'REQUIRED', ']', 'The', 'name', 'that', 'is', 'associated', 'with', 'the', 'stack', '.', 'The', 'name', 'must', 'be', 'unique', 'in', 'the', 'region', 'in', 'which', 'you', 'are', 'creating', 'the', 'stack', '.', 'Note', 'A', 'stack', 'name', 'can', 'contain', 'only', 'alphanumeric', 'characters', '(', 'case', 'sensitive', ')', 'and', 'hyphens', '.', 'It', 'must', 'start', 'with', 'an', 'alphabetic', 'character', 'and', 'cannot', 'be', 'longer', 'than', '128', 'characters', '.']
train
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/cloudformation.py#L255-L386
8,252
pandas-dev/pandas
pandas/core/dtypes/cast.py
maybe_convert_platform
def maybe_convert_platform(values): """ try to do platform conversion, allow ndarray or list here """ if isinstance(values, (list, tuple)): values = construct_1d_object_array_from_listlike(list(values)) if getattr(values, 'dtype', None) == np.object_: if hasattr(values, '_values'): values = values._values values = lib.maybe_convert_objects(values) return values
python
def maybe_convert_platform(values): """ try to do platform conversion, allow ndarray or list here """ if isinstance(values, (list, tuple)): values = construct_1d_object_array_from_listlike(list(values)) if getattr(values, 'dtype', None) == np.object_: if hasattr(values, '_values'): values = values._values values = lib.maybe_convert_objects(values) return values
['def', 'maybe_convert_platform', '(', 'values', ')', ':', 'if', 'isinstance', '(', 'values', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'values', '=', 'construct_1d_object_array_from_listlike', '(', 'list', '(', 'values', ')', ')', 'if', 'getattr', '(', 'values', ',', "'dtype'", ',', 'None', ')', '==', 'np', '.', 'object_', ':', 'if', 'hasattr', '(', 'values', ',', "'_values'", ')', ':', 'values', '=', 'values', '.', '_values', 'values', '=', 'lib', '.', 'maybe_convert_objects', '(', 'values', ')', 'return', 'values']
try to do platform conversion, allow ndarray or list here
['try', 'to', 'do', 'platform', 'conversion', 'allow', 'ndarray', 'or', 'list', 'here']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L35-L45
8,253
softlayer/softlayer-python
SoftLayer/CLI/block/replication/failover.py
cli
def cli(env, volume_id, replicant_id, immediate): """Failover a block volume to the given replicant volume.""" block_storage_manager = SoftLayer.BlockStorageManager(env.client) success = block_storage_manager.failover_to_replicant( volume_id, replicant_id, immediate ) if success: click.echo("Failover to replicant is now in progress.") else: click.echo("Failover operation could not be initiated.")
python
def cli(env, volume_id, replicant_id, immediate): """Failover a block volume to the given replicant volume.""" block_storage_manager = SoftLayer.BlockStorageManager(env.client) success = block_storage_manager.failover_to_replicant( volume_id, replicant_id, immediate ) if success: click.echo("Failover to replicant is now in progress.") else: click.echo("Failover operation could not be initiated.")
['def', 'cli', '(', 'env', ',', 'volume_id', ',', 'replicant_id', ',', 'immediate', ')', ':', 'block_storage_manager', '=', 'SoftLayer', '.', 'BlockStorageManager', '(', 'env', '.', 'client', ')', 'success', '=', 'block_storage_manager', '.', 'failover_to_replicant', '(', 'volume_id', ',', 'replicant_id', ',', 'immediate', ')', 'if', 'success', ':', 'click', '.', 'echo', '(', '"Failover to replicant is now in progress."', ')', 'else', ':', 'click', '.', 'echo', '(', '"Failover operation could not be initiated."', ')']
Failover a block volume to the given replicant volume.
['Failover', 'a', 'block', 'volume', 'to', 'the', 'given', 'replicant', 'volume', '.']
train
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/block/replication/failover.py#L17-L30
8,254
dschep/lambda-decorators
lambda_decorators.py
on_exception
def on_exception(func): """ Run a function when a handler thows an exception. It's return value is returned to AWS. Usage:: >>> # to create a reusable decorator >>> @on_exception ... def handle_errors(exception): ... print(exception) ... return {'statusCode': 500, 'body': 'uh oh'} >>> @handle_errors ... def handler(event, context): ... raise Exception('it broke!') >>> handler({}, object()) it broke! {'statusCode': 500, 'body': 'uh oh'} >>> # or a one off >>> @on_exception(lambda e: {'statusCode': 500}) ... def handler(body, context): ... raise Exception >>> handler({}, object()) {'statusCode': 500} """ class OnExceptionDecorator(LambdaDecorator): def on_exception(self, exception): return func(exception) return OnExceptionDecorator
python
def on_exception(func): """ Run a function when a handler thows an exception. It's return value is returned to AWS. Usage:: >>> # to create a reusable decorator >>> @on_exception ... def handle_errors(exception): ... print(exception) ... return {'statusCode': 500, 'body': 'uh oh'} >>> @handle_errors ... def handler(event, context): ... raise Exception('it broke!') >>> handler({}, object()) it broke! {'statusCode': 500, 'body': 'uh oh'} >>> # or a one off >>> @on_exception(lambda e: {'statusCode': 500}) ... def handler(body, context): ... raise Exception >>> handler({}, object()) {'statusCode': 500} """ class OnExceptionDecorator(LambdaDecorator): def on_exception(self, exception): return func(exception) return OnExceptionDecorator
['def', 'on_exception', '(', 'func', ')', ':', 'class', 'OnExceptionDecorator', '(', 'LambdaDecorator', ')', ':', 'def', 'on_exception', '(', 'self', ',', 'exception', ')', ':', 'return', 'func', '(', 'exception', ')', 'return', 'OnExceptionDecorator']
Run a function when a handler thows an exception. It's return value is returned to AWS. Usage:: >>> # to create a reusable decorator >>> @on_exception ... def handle_errors(exception): ... print(exception) ... return {'statusCode': 500, 'body': 'uh oh'} >>> @handle_errors ... def handler(event, context): ... raise Exception('it broke!') >>> handler({}, object()) it broke! {'statusCode': 500, 'body': 'uh oh'} >>> # or a one off >>> @on_exception(lambda e: {'statusCode': 500}) ... def handler(body, context): ... raise Exception >>> handler({}, object()) {'statusCode': 500}
['Run', 'a', 'function', 'when', 'a', 'handler', 'thows', 'an', 'exception', '.', 'It', 's', 'return', 'value', 'is', 'returned', 'to', 'AWS', '.']
train
https://github.com/dschep/lambda-decorators/blob/9195914c8afe26843de9968d96dae6a89f061e8a/lambda_decorators.py#L292-L321
8,255
RI-imaging/qpformat
qpformat/file_formats/dataset.py
SeriesData.saveh5
def saveh5(self, h5file, qpi_slice=None, series_slice=None, time_interval=None, count=None, max_count=None): """Save the data set as an hdf5 file (qpimage.QPSeries format) Parameters ---------- h5file: str, pathlib.Path, or h5py.Group Where to store the series data qpi_slice: tuple of (slice, slice) If not None, only store a slice of each QPImage in `h5file`. A value of None is equivalent to ``(slice(0, -1), slice(0, -1))``. series_slice: slice If None, save the entire series, otherwise only save the images specified by this slice. time_interval: tuple of (float, float) If not None, only stores QPImages that were recorded within the given time interval. count, max_count: multiprocessing.Value Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. Notes ----- The series "identifier" meta data is only set when all of `qpi_slice`, `series_slice`, and `time_interval` are None. """ # set up slice to export if series_slice is None: sl = range(len(self)) else: sl = range(series_slice.start, series_slice.stop) # set up time interval if time_interval is None: ta = -np.inf tb = np.inf else: ta, tb = time_interval # set max_count according to slice if max_count is not None: max_count.value += len(sl) qpskw = {"h5file": h5file, "h5mode": "w", } if (qpi_slice is None and series_slice is None and time_interval is None): # Only add series identifier if series complete. # (We assume that if any of the above kwargs is set, # the series data is somehow modified) qpskw["identifier"] = self.identifier with qpimage.QPSeries(**qpskw) as qps: increment = 0 for ii in sl: ti = self.get_time(ii) if ti < ta or ti > tb: # Not part of the series pass else: increment += 1 if increment == 1 or len(self._bgdata) != 1: # initial image or series data where each image # has a unique background image qpi = self.get_qpimage(ii) if qpi_slice is not None: qpi = qpi[qpi_slice] qps.add_qpimage(qpi) else: # hard-link the background data qpiraw = self.get_qpimage_raw(ii) if qpi_slice is not None: qpiraw = qpiraw[qpi_slice] qps.add_qpimage(qpiraw, bg_from_idx=0) if count is not None: count.value += 1
python
def saveh5(self, h5file, qpi_slice=None, series_slice=None, time_interval=None, count=None, max_count=None): """Save the data set as an hdf5 file (qpimage.QPSeries format) Parameters ---------- h5file: str, pathlib.Path, or h5py.Group Where to store the series data qpi_slice: tuple of (slice, slice) If not None, only store a slice of each QPImage in `h5file`. A value of None is equivalent to ``(slice(0, -1), slice(0, -1))``. series_slice: slice If None, save the entire series, otherwise only save the images specified by this slice. time_interval: tuple of (float, float) If not None, only stores QPImages that were recorded within the given time interval. count, max_count: multiprocessing.Value Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. Notes ----- The series "identifier" meta data is only set when all of `qpi_slice`, `series_slice`, and `time_interval` are None. """ # set up slice to export if series_slice is None: sl = range(len(self)) else: sl = range(series_slice.start, series_slice.stop) # set up time interval if time_interval is None: ta = -np.inf tb = np.inf else: ta, tb = time_interval # set max_count according to slice if max_count is not None: max_count.value += len(sl) qpskw = {"h5file": h5file, "h5mode": "w", } if (qpi_slice is None and series_slice is None and time_interval is None): # Only add series identifier if series complete. # (We assume that if any of the above kwargs is set, # the series data is somehow modified) qpskw["identifier"] = self.identifier with qpimage.QPSeries(**qpskw) as qps: increment = 0 for ii in sl: ti = self.get_time(ii) if ti < ta or ti > tb: # Not part of the series pass else: increment += 1 if increment == 1 or len(self._bgdata) != 1: # initial image or series data where each image # has a unique background image qpi = self.get_qpimage(ii) if qpi_slice is not None: qpi = qpi[qpi_slice] qps.add_qpimage(qpi) else: # hard-link the background data qpiraw = self.get_qpimage_raw(ii) if qpi_slice is not None: qpiraw = qpiraw[qpi_slice] qps.add_qpimage(qpiraw, bg_from_idx=0) if count is not None: count.value += 1
['def', 'saveh5', '(', 'self', ',', 'h5file', ',', 'qpi_slice', '=', 'None', ',', 'series_slice', '=', 'None', ',', 'time_interval', '=', 'None', ',', 'count', '=', 'None', ',', 'max_count', '=', 'None', ')', ':', '# set up slice to export', 'if', 'series_slice', 'is', 'None', ':', 'sl', '=', 'range', '(', 'len', '(', 'self', ')', ')', 'else', ':', 'sl', '=', 'range', '(', 'series_slice', '.', 'start', ',', 'series_slice', '.', 'stop', ')', '# set up time interval', 'if', 'time_interval', 'is', 'None', ':', 'ta', '=', '-', 'np', '.', 'inf', 'tb', '=', 'np', '.', 'inf', 'else', ':', 'ta', ',', 'tb', '=', 'time_interval', '# set max_count according to slice', 'if', 'max_count', 'is', 'not', 'None', ':', 'max_count', '.', 'value', '+=', 'len', '(', 'sl', ')', 'qpskw', '=', '{', '"h5file"', ':', 'h5file', ',', '"h5mode"', ':', '"w"', ',', '}', 'if', '(', 'qpi_slice', 'is', 'None', 'and', 'series_slice', 'is', 'None', 'and', 'time_interval', 'is', 'None', ')', ':', '# Only add series identifier if series complete.', '# (We assume that if any of the above kwargs is set,', '# the series data is somehow modified)', 'qpskw', '[', '"identifier"', ']', '=', 'self', '.', 'identifier', 'with', 'qpimage', '.', 'QPSeries', '(', '*', '*', 'qpskw', ')', 'as', 'qps', ':', 'increment', '=', '0', 'for', 'ii', 'in', 'sl', ':', 'ti', '=', 'self', '.', 'get_time', '(', 'ii', ')', 'if', 'ti', '<', 'ta', 'or', 'ti', '>', 'tb', ':', '# Not part of the series', 'pass', 'else', ':', 'increment', '+=', '1', 'if', 'increment', '==', '1', 'or', 'len', '(', 'self', '.', '_bgdata', ')', '!=', '1', ':', '# initial image or series data where each image', '# has a unique background image', 'qpi', '=', 'self', '.', 'get_qpimage', '(', 'ii', ')', 'if', 'qpi_slice', 'is', 'not', 'None', ':', 'qpi', '=', 'qpi', '[', 'qpi_slice', ']', 'qps', '.', 'add_qpimage', '(', 'qpi', ')', 'else', ':', '# hard-link the background data', 'qpiraw', '=', 'self', '.', 'get_qpimage_raw', '(', 'ii', ')', 'if', 'qpi_slice', 'is', 'not', 'None', ':', 'qpiraw', '=', 'qpiraw', '[', 'qpi_slice', ']', 'qps', '.', 'add_qpimage', '(', 'qpiraw', ',', 'bg_from_idx', '=', '0', ')', 'if', 'count', 'is', 'not', 'None', ':', 'count', '.', 'value', '+=', '1']
Save the data set as an hdf5 file (qpimage.QPSeries format) Parameters ---------- h5file: str, pathlib.Path, or h5py.Group Where to store the series data qpi_slice: tuple of (slice, slice) If not None, only store a slice of each QPImage in `h5file`. A value of None is equivalent to ``(slice(0, -1), slice(0, -1))``. series_slice: slice If None, save the entire series, otherwise only save the images specified by this slice. time_interval: tuple of (float, float) If not None, only stores QPImages that were recorded within the given time interval. count, max_count: multiprocessing.Value Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. Notes ----- The series "identifier" meta data is only set when all of `qpi_slice`, `series_slice`, and `time_interval` are None.
['Save', 'the', 'data', 'set', 'as', 'an', 'hdf5', 'file', '(', 'qpimage', '.', 'QPSeries', 'format', ')']
train
https://github.com/RI-imaging/qpformat/blob/364e29d7d9e8b9f1d7a4a25c753d1baf9d73d5eb/qpformat/file_formats/dataset.py#L211-L291
8,256
lsbardel/python-stdnet
stdnet/utils/jsontools.py
addmul_number_dicts
def addmul_number_dicts(series): '''Multiply dictionaries by a numeric values and add them together. :parameter series: a tuple of two elements tuples. Each serie is of the form:: (weight,dictionary) where ``weight`` is a number and ``dictionary`` is a dictionary with numeric values. :parameter skip: optional list of field names to skip. Only common fields are aggregated. If a field has a non-numeric value it is not included either.''' if not series: return vtype = value_type((s[1] for s in series)) if vtype == 1: return sum((weight*float(d) for weight, d in series)) elif vtype == 3: keys = set(series[0][1]) for serie in series[1:]: keys.intersection_update(serie[1]) results = {} for key in keys: key_series = tuple((weight, d[key]) for weight, d in series) result = addmul_number_dicts(key_series) if result is not None: results[key] = result return results
python
def addmul_number_dicts(series): '''Multiply dictionaries by a numeric values and add them together. :parameter series: a tuple of two elements tuples. Each serie is of the form:: (weight,dictionary) where ``weight`` is a number and ``dictionary`` is a dictionary with numeric values. :parameter skip: optional list of field names to skip. Only common fields are aggregated. If a field has a non-numeric value it is not included either.''' if not series: return vtype = value_type((s[1] for s in series)) if vtype == 1: return sum((weight*float(d) for weight, d in series)) elif vtype == 3: keys = set(series[0][1]) for serie in series[1:]: keys.intersection_update(serie[1]) results = {} for key in keys: key_series = tuple((weight, d[key]) for weight, d in series) result = addmul_number_dicts(key_series) if result is not None: results[key] = result return results
['def', 'addmul_number_dicts', '(', 'series', ')', ':', 'if', 'not', 'series', ':', 'return', 'vtype', '=', 'value_type', '(', '(', 's', '[', '1', ']', 'for', 's', 'in', 'series', ')', ')', 'if', 'vtype', '==', '1', ':', 'return', 'sum', '(', '(', 'weight', '*', 'float', '(', 'd', ')', 'for', 'weight', ',', 'd', 'in', 'series', ')', ')', 'elif', 'vtype', '==', '3', ':', 'keys', '=', 'set', '(', 'series', '[', '0', ']', '[', '1', ']', ')', 'for', 'serie', 'in', 'series', '[', '1', ':', ']', ':', 'keys', '.', 'intersection_update', '(', 'serie', '[', '1', ']', ')', 'results', '=', '{', '}', 'for', 'key', 'in', 'keys', ':', 'key_series', '=', 'tuple', '(', '(', 'weight', ',', 'd', '[', 'key', ']', ')', 'for', 'weight', ',', 'd', 'in', 'series', ')', 'result', '=', 'addmul_number_dicts', '(', 'key_series', ')', 'if', 'result', 'is', 'not', 'None', ':', 'results', '[', 'key', ']', '=', 'result', 'return', 'results']
Multiply dictionaries by a numeric values and add them together. :parameter series: a tuple of two elements tuples. Each serie is of the form:: (weight,dictionary) where ``weight`` is a number and ``dictionary`` is a dictionary with numeric values. :parameter skip: optional list of field names to skip. Only common fields are aggregated. If a field has a non-numeric value it is not included either.
['Multiply', 'dictionaries', 'by', 'a', 'numeric', 'values', 'and', 'add', 'them', 'together', '.']
train
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/jsontools.py#L201-L229
8,257
apache/incubator-mxnet
example/fcn-xs/init_fcnxs.py
init_from_fcnxs
def init_from_fcnxs(ctx, fcnxs_symbol, fcnxs_args_from, fcnxs_auxs_from): """ use zero initialization for better convergence, because it tends to oputut 0, and the label 0 stands for background, which may occupy most size of one image. """ fcnxs_args = fcnxs_args_from.copy() fcnxs_auxs = fcnxs_auxs_from.copy() for k,v in fcnxs_args.items(): if(v.context != ctx): fcnxs_args[k] = mx.nd.zeros(v.shape, ctx) v.copyto(fcnxs_args[k]) for k,v in fcnxs_auxs.items(): if(v.context != ctx): fcnxs_auxs[k] = mx.nd.zeros(v.shape, ctx) v.copyto(fcnxs_auxs[k]) data_shape=(1,3,500,500) arg_names = fcnxs_symbol.list_arguments() arg_shapes, _, _ = fcnxs_symbol.infer_shape(data=data_shape) rest_params = {} deconv_params = {} # this is fcn8s init from fcn16s if 'score_pool3_weight' in arg_names: rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes) if x[0] in ['score_pool3_bias', 'score_pool3_weight']]) deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \ in ["bigscore_weight", 'score4_weight']]) # this is fcn16s init from fcn32s elif 'score_pool4_weight' in arg_names: rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes) if x[0] in ['score_pool4_weight', 'score_pool4_bias']]) deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \ in ["bigscore_weight", 'score2_weight']]) # this is fcn32s init else: logging.error("you are init the fcn32s model, so you should use init_from_vgg16()") sys.exit() fcnxs_args.update(rest_params) for k, v in deconv_params.items(): filt = upsample_filt(v[3]) initw = np.zeros(v) initw[range(v[0]), range(v[1]), :, :] = filt # becareful here is the slice assing fcnxs_args[k] = mx.nd.array(initw, ctx) return fcnxs_args, fcnxs_auxs
python
def init_from_fcnxs(ctx, fcnxs_symbol, fcnxs_args_from, fcnxs_auxs_from): """ use zero initialization for better convergence, because it tends to oputut 0, and the label 0 stands for background, which may occupy most size of one image. """ fcnxs_args = fcnxs_args_from.copy() fcnxs_auxs = fcnxs_auxs_from.copy() for k,v in fcnxs_args.items(): if(v.context != ctx): fcnxs_args[k] = mx.nd.zeros(v.shape, ctx) v.copyto(fcnxs_args[k]) for k,v in fcnxs_auxs.items(): if(v.context != ctx): fcnxs_auxs[k] = mx.nd.zeros(v.shape, ctx) v.copyto(fcnxs_auxs[k]) data_shape=(1,3,500,500) arg_names = fcnxs_symbol.list_arguments() arg_shapes, _, _ = fcnxs_symbol.infer_shape(data=data_shape) rest_params = {} deconv_params = {} # this is fcn8s init from fcn16s if 'score_pool3_weight' in arg_names: rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes) if x[0] in ['score_pool3_bias', 'score_pool3_weight']]) deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \ in ["bigscore_weight", 'score4_weight']]) # this is fcn16s init from fcn32s elif 'score_pool4_weight' in arg_names: rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes) if x[0] in ['score_pool4_weight', 'score_pool4_bias']]) deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \ in ["bigscore_weight", 'score2_weight']]) # this is fcn32s init else: logging.error("you are init the fcn32s model, so you should use init_from_vgg16()") sys.exit() fcnxs_args.update(rest_params) for k, v in deconv_params.items(): filt = upsample_filt(v[3]) initw = np.zeros(v) initw[range(v[0]), range(v[1]), :, :] = filt # becareful here is the slice assing fcnxs_args[k] = mx.nd.array(initw, ctx) return fcnxs_args, fcnxs_auxs
['def', 'init_from_fcnxs', '(', 'ctx', ',', 'fcnxs_symbol', ',', 'fcnxs_args_from', ',', 'fcnxs_auxs_from', ')', ':', 'fcnxs_args', '=', 'fcnxs_args_from', '.', 'copy', '(', ')', 'fcnxs_auxs', '=', 'fcnxs_auxs_from', '.', 'copy', '(', ')', 'for', 'k', ',', 'v', 'in', 'fcnxs_args', '.', 'items', '(', ')', ':', 'if', '(', 'v', '.', 'context', '!=', 'ctx', ')', ':', 'fcnxs_args', '[', 'k', ']', '=', 'mx', '.', 'nd', '.', 'zeros', '(', 'v', '.', 'shape', ',', 'ctx', ')', 'v', '.', 'copyto', '(', 'fcnxs_args', '[', 'k', ']', ')', 'for', 'k', ',', 'v', 'in', 'fcnxs_auxs', '.', 'items', '(', ')', ':', 'if', '(', 'v', '.', 'context', '!=', 'ctx', ')', ':', 'fcnxs_auxs', '[', 'k', ']', '=', 'mx', '.', 'nd', '.', 'zeros', '(', 'v', '.', 'shape', ',', 'ctx', ')', 'v', '.', 'copyto', '(', 'fcnxs_auxs', '[', 'k', ']', ')', 'data_shape', '=', '(', '1', ',', '3', ',', '500', ',', '500', ')', 'arg_names', '=', 'fcnxs_symbol', '.', 'list_arguments', '(', ')', 'arg_shapes', ',', '_', ',', '_', '=', 'fcnxs_symbol', '.', 'infer_shape', '(', 'data', '=', 'data_shape', ')', 'rest_params', '=', '{', '}', 'deconv_params', '=', '{', '}', '# this is fcn8s init from fcn16s', 'if', "'score_pool3_weight'", 'in', 'arg_names', ':', 'rest_params', '=', 'dict', '(', '[', '(', 'x', '[', '0', ']', ',', 'mx', '.', 'nd', '.', 'zeros', '(', 'x', '[', '1', ']', ',', 'ctx', ')', ')', 'for', 'x', 'in', 'zip', '(', 'arg_names', ',', 'arg_shapes', ')', 'if', 'x', '[', '0', ']', 'in', '[', "'score_pool3_bias'", ',', "'score_pool3_weight'", ']', ']', ')', 'deconv_params', '=', 'dict', '(', '[', '(', 'x', '[', '0', ']', ',', 'x', '[', '1', ']', ')', 'for', 'x', 'in', 'zip', '(', 'arg_names', ',', 'arg_shapes', ')', 'if', 'x', '[', '0', ']', 'in', '[', '"bigscore_weight"', ',', "'score4_weight'", ']', ']', ')', '# this is fcn16s init from fcn32s', 'elif', "'score_pool4_weight'", 'in', 'arg_names', ':', 'rest_params', '=', 'dict', '(', '[', '(', 'x', '[', '0', ']', ',', 'mx', '.', 'nd', '.', 'zeros', '(', 'x', '[', '1', ']', ',', 'ctx', ')', ')', 'for', 'x', 'in', 'zip', '(', 'arg_names', ',', 'arg_shapes', ')', 'if', 'x', '[', '0', ']', 'in', '[', "'score_pool4_weight'", ',', "'score_pool4_bias'", ']', ']', ')', 'deconv_params', '=', 'dict', '(', '[', '(', 'x', '[', '0', ']', ',', 'x', '[', '1', ']', ')', 'for', 'x', 'in', 'zip', '(', 'arg_names', ',', 'arg_shapes', ')', 'if', 'x', '[', '0', ']', 'in', '[', '"bigscore_weight"', ',', "'score2_weight'", ']', ']', ')', '# this is fcn32s init', 'else', ':', 'logging', '.', 'error', '(', '"you are init the fcn32s model, so you should use init_from_vgg16()"', ')', 'sys', '.', 'exit', '(', ')', 'fcnxs_args', '.', 'update', '(', 'rest_params', ')', 'for', 'k', ',', 'v', 'in', 'deconv_params', '.', 'items', '(', ')', ':', 'filt', '=', 'upsample_filt', '(', 'v', '[', '3', ']', ')', 'initw', '=', 'np', '.', 'zeros', '(', 'v', ')', 'initw', '[', 'range', '(', 'v', '[', '0', ']', ')', ',', 'range', '(', 'v', '[', '1', ']', ')', ',', ':', ',', ':', ']', '=', 'filt', '# becareful here is the slice assing', 'fcnxs_args', '[', 'k', ']', '=', 'mx', '.', 'nd', '.', 'array', '(', 'initw', ',', 'ctx', ')', 'return', 'fcnxs_args', ',', 'fcnxs_auxs']
use zero initialization for better convergence, because it tends to oputut 0, and the label 0 stands for background, which may occupy most size of one image.
['use', 'zero', 'initialization', 'for', 'better', 'convergence', 'because', 'it', 'tends', 'to', 'oputut', '0', 'and', 'the', 'label', '0', 'stands', 'for', 'background', 'which', 'may', 'occupy', 'most', 'size', 'of', 'one', 'image', '.']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/fcn-xs/init_fcnxs.py#L65-L106
8,258
summa-tx/riemann
riemann/tx/sprout.py
SproutTx._sighash_prep
def _sighash_prep(self, index, script): ''' SproutTx, int, byte-like -> SproutTx Sighashes suck Performs the sighash setup described here: https://en.bitcoin.it/wiki/OP_CHECKSIG#How_it_works https://bitcoin.stackexchange.com/questions/3374/how-to-redeem-a-basic-tx We save on complexity by refusing to support OP_CODESEPARATOR ''' if len(self.tx_ins) == 0: return self.copy(joinsplit_sig=b'') # 0 out scripts in tx_ins copy_tx_ins = [tx_in.copy(stack_script=b'', redeem_script=b'') for tx_in in self.tx_ins] # NB: The script for the current transaction input in txCopy is set to # subScript (lead in by its length as a var-integer encoded!) copy_tx_ins[index] = \ copy_tx_ins[index].copy(stack_script=b'', redeem_script=script) return self.copy(tx_ins=copy_tx_ins, joinsplit_sig=b'')
python
def _sighash_prep(self, index, script): ''' SproutTx, int, byte-like -> SproutTx Sighashes suck Performs the sighash setup described here: https://en.bitcoin.it/wiki/OP_CHECKSIG#How_it_works https://bitcoin.stackexchange.com/questions/3374/how-to-redeem-a-basic-tx We save on complexity by refusing to support OP_CODESEPARATOR ''' if len(self.tx_ins) == 0: return self.copy(joinsplit_sig=b'') # 0 out scripts in tx_ins copy_tx_ins = [tx_in.copy(stack_script=b'', redeem_script=b'') for tx_in in self.tx_ins] # NB: The script for the current transaction input in txCopy is set to # subScript (lead in by its length as a var-integer encoded!) copy_tx_ins[index] = \ copy_tx_ins[index].copy(stack_script=b'', redeem_script=script) return self.copy(tx_ins=copy_tx_ins, joinsplit_sig=b'')
['def', '_sighash_prep', '(', 'self', ',', 'index', ',', 'script', ')', ':', 'if', 'len', '(', 'self', '.', 'tx_ins', ')', '==', '0', ':', 'return', 'self', '.', 'copy', '(', 'joinsplit_sig', '=', "b''", ')', '# 0 out scripts in tx_ins', 'copy_tx_ins', '=', '[', 'tx_in', '.', 'copy', '(', 'stack_script', '=', "b''", ',', 'redeem_script', '=', "b''", ')', 'for', 'tx_in', 'in', 'self', '.', 'tx_ins', ']', '# NB: The script for the current transaction input in txCopy is set to', '# subScript (lead in by its length as a var-integer encoded!)', 'copy_tx_ins', '[', 'index', ']', '=', 'copy_tx_ins', '[', 'index', ']', '.', 'copy', '(', 'stack_script', '=', "b''", ',', 'redeem_script', '=', 'script', ')', 'return', 'self', '.', 'copy', '(', 'tx_ins', '=', 'copy_tx_ins', ',', 'joinsplit_sig', '=', "b''", ')']
SproutTx, int, byte-like -> SproutTx Sighashes suck Performs the sighash setup described here: https://en.bitcoin.it/wiki/OP_CHECKSIG#How_it_works https://bitcoin.stackexchange.com/questions/3374/how-to-redeem-a-basic-tx We save on complexity by refusing to support OP_CODESEPARATOR
['SproutTx', 'int', 'byte', '-', 'like', '-', '>', 'SproutTx', 'Sighashes', 'suck', 'Performs', 'the', 'sighash', 'setup', 'described', 'here', ':', 'https', ':', '//', 'en', '.', 'bitcoin', '.', 'it', '/', 'wiki', '/', 'OP_CHECKSIG#How_it_works', 'https', ':', '//', 'bitcoin', '.', 'stackexchange', '.', 'com', '/', 'questions', '/', '3374', '/', 'how', '-', 'to', '-', 'redeem', '-', 'a', '-', 'basic', '-', 'tx', 'We', 'save', 'on', 'complexity', 'by', 'refusing', 'to', 'support', 'OP_CODESEPARATOR']
train
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/sprout.py#L219-L240
8,259
itamarst/eliot
eliot/journald.py
sd_journal_send
def sd_journal_send(**kwargs): """ Send a message to the journald log. @param kwargs: Mapping between field names to values, both as bytes. @raise IOError: If the operation failed. """ # The function uses printf formatting, so we need to quote # percentages. fields = [ _ffi.new( "char[]", key.encode("ascii") + b'=' + value.replace(b"%", b"%%")) for key, value in kwargs.items()] fields.append(_ffi.NULL) result = _journald.sd_journal_send(*fields) if result != 0: raise IOError(-result, strerror(-result))
python
def sd_journal_send(**kwargs): """ Send a message to the journald log. @param kwargs: Mapping between field names to values, both as bytes. @raise IOError: If the operation failed. """ # The function uses printf formatting, so we need to quote # percentages. fields = [ _ffi.new( "char[]", key.encode("ascii") + b'=' + value.replace(b"%", b"%%")) for key, value in kwargs.items()] fields.append(_ffi.NULL) result = _journald.sd_journal_send(*fields) if result != 0: raise IOError(-result, strerror(-result))
['def', 'sd_journal_send', '(', '*', '*', 'kwargs', ')', ':', '# The function uses printf formatting, so we need to quote', '# percentages.', 'fields', '=', '[', '_ffi', '.', 'new', '(', '"char[]"', ',', 'key', '.', 'encode', '(', '"ascii"', ')', '+', "b'='", '+', 'value', '.', 'replace', '(', 'b"%"', ',', 'b"%%"', ')', ')', 'for', 'key', ',', 'value', 'in', 'kwargs', '.', 'items', '(', ')', ']', 'fields', '.', 'append', '(', '_ffi', '.', 'NULL', ')', 'result', '=', '_journald', '.', 'sd_journal_send', '(', '*', 'fields', ')', 'if', 'result', '!=', '0', ':', 'raise', 'IOError', '(', '-', 'result', ',', 'strerror', '(', '-', 'result', ')', ')']
Send a message to the journald log. @param kwargs: Mapping between field names to values, both as bytes. @raise IOError: If the operation failed.
['Send', 'a', 'message', 'to', 'the', 'journald', 'log', '.']
train
https://github.com/itamarst/eliot/blob/c03c96520c5492fadfc438b4b0f6336e2785ba2d/eliot/journald.py#L28-L45
8,260
facetoe/zenpy
zenpy/lib/api.py
VariantApi.show
def show(self, item, variant): """ Show a variant. :param item: Item object or id :param variant: Variant object or id :return: """ url = self._build_url(self.endpoint.show(item, variant)) return self._get(url)
python
def show(self, item, variant): """ Show a variant. :param item: Item object or id :param variant: Variant object or id :return: """ url = self._build_url(self.endpoint.show(item, variant)) return self._get(url)
['def', 'show', '(', 'self', ',', 'item', ',', 'variant', ')', ':', 'url', '=', 'self', '.', '_build_url', '(', 'self', '.', 'endpoint', '.', 'show', '(', 'item', ',', 'variant', ')', ')', 'return', 'self', '.', '_get', '(', 'url', ')']
Show a variant. :param item: Item object or id :param variant: Variant object or id :return:
['Show', 'a', 'variant', '.']
train
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L1273-L1282
8,261
kejbaly2/metrique
metrique/result.py
Result.unfinished_objects
def unfinished_objects(self): ''' Leaves only versions of those objects that has some version with `_end == None` or with `_end > right cutoff`. ''' mask = self._end_isnull if self._rbound is not None: mask = mask | (self._end > self._rbound) oids = set(self[mask]._oid.tolist()) return self[self._oid.apply(lambda oid: oid in oids)]
python
def unfinished_objects(self): ''' Leaves only versions of those objects that has some version with `_end == None` or with `_end > right cutoff`. ''' mask = self._end_isnull if self._rbound is not None: mask = mask | (self._end > self._rbound) oids = set(self[mask]._oid.tolist()) return self[self._oid.apply(lambda oid: oid in oids)]
['def', 'unfinished_objects', '(', 'self', ')', ':', 'mask', '=', 'self', '.', '_end_isnull', 'if', 'self', '.', '_rbound', 'is', 'not', 'None', ':', 'mask', '=', 'mask', '|', '(', 'self', '.', '_end', '>', 'self', '.', '_rbound', ')', 'oids', '=', 'set', '(', 'self', '[', 'mask', ']', '.', '_oid', '.', 'tolist', '(', ')', ')', 'return', 'self', '[', 'self', '.', '_oid', '.', 'apply', '(', 'lambda', 'oid', ':', 'oid', 'in', 'oids', ')', ']']
Leaves only versions of those objects that has some version with `_end == None` or with `_end > right cutoff`.
['Leaves', 'only', 'versions', 'of', 'those', 'objects', 'that', 'has', 'some', 'version', 'with', '_end', '==', 'None', 'or', 'with', '_end', '>', 'right', 'cutoff', '.']
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L313-L322
8,262
fp12/achallonge
challonge/participant.py
Participant.get_next_opponent
async def get_next_opponent(self): """ Get the opponent of the potential next match. See :func:`get_next_match` |methcoro| Raises: APIException """ next_match = await self.get_next_match() if next_match is not None: opponent_id = next_match.player1_id if next_match.player2_id == self._id else next_match.player2_id return await self._tournament.get_participant(opponent_id) return None
python
async def get_next_opponent(self): """ Get the opponent of the potential next match. See :func:`get_next_match` |methcoro| Raises: APIException """ next_match = await self.get_next_match() if next_match is not None: opponent_id = next_match.player1_id if next_match.player2_id == self._id else next_match.player2_id return await self._tournament.get_participant(opponent_id) return None
['async', 'def', 'get_next_opponent', '(', 'self', ')', ':', 'next_match', '=', 'await', 'self', '.', 'get_next_match', '(', ')', 'if', 'next_match', 'is', 'not', 'None', ':', 'opponent_id', '=', 'next_match', '.', 'player1_id', 'if', 'next_match', '.', 'player2_id', '==', 'self', '.', '_id', 'else', 'next_match', '.', 'player2_id', 'return', 'await', 'self', '.', '_tournament', '.', 'get_participant', '(', 'opponent_id', ')', 'return', 'None']
Get the opponent of the potential next match. See :func:`get_next_match` |methcoro| Raises: APIException
['Get', 'the', 'opponent', 'of', 'the', 'potential', 'next', 'match', '.', 'See', ':', 'func', ':', 'get_next_match']
train
https://github.com/fp12/achallonge/blob/25780b3c48b66400a50ff9f884e4287afd4c89e4/challonge/participant.py#L194-L207
8,263
spotify/snakebite
snakebite/client.py
Client.copyToLocal
def copyToLocal(self, paths, dst, check_crc=False): ''' Copy files that match the file source pattern to the local name. Source is kept. When copying multiple, files, the destination must be a directory. :param paths: Paths to copy :type paths: list of strings :param dst: Destination path :type dst: string :param check_crc: Check for checksum errors :type check_crc: boolean :returns: a generator that yields strings ''' if not isinstance(paths, list): raise InvalidInputException("Paths should be a list") if not paths: raise InvalidInputException("copyToLocal: no path given") if not dst: raise InvalidInputException("copyToLocal: no destination given") dst = self._normalize_path(dst) processor = lambda path, node, dst=dst, check_crc=check_crc: self._handle_copyToLocal(path, node, dst, check_crc) for path in paths: self.base_source = None for item in self._find_items([path], processor, include_toplevel=True, recurse=True, include_children=True): if item: yield item
python
def copyToLocal(self, paths, dst, check_crc=False): ''' Copy files that match the file source pattern to the local name. Source is kept. When copying multiple, files, the destination must be a directory. :param paths: Paths to copy :type paths: list of strings :param dst: Destination path :type dst: string :param check_crc: Check for checksum errors :type check_crc: boolean :returns: a generator that yields strings ''' if not isinstance(paths, list): raise InvalidInputException("Paths should be a list") if not paths: raise InvalidInputException("copyToLocal: no path given") if not dst: raise InvalidInputException("copyToLocal: no destination given") dst = self._normalize_path(dst) processor = lambda path, node, dst=dst, check_crc=check_crc: self._handle_copyToLocal(path, node, dst, check_crc) for path in paths: self.base_source = None for item in self._find_items([path], processor, include_toplevel=True, recurse=True, include_children=True): if item: yield item
['def', 'copyToLocal', '(', 'self', ',', 'paths', ',', 'dst', ',', 'check_crc', '=', 'False', ')', ':', 'if', 'not', 'isinstance', '(', 'paths', ',', 'list', ')', ':', 'raise', 'InvalidInputException', '(', '"Paths should be a list"', ')', 'if', 'not', 'paths', ':', 'raise', 'InvalidInputException', '(', '"copyToLocal: no path given"', ')', 'if', 'not', 'dst', ':', 'raise', 'InvalidInputException', '(', '"copyToLocal: no destination given"', ')', 'dst', '=', 'self', '.', '_normalize_path', '(', 'dst', ')', 'processor', '=', 'lambda', 'path', ',', 'node', ',', 'dst', '=', 'dst', ',', 'check_crc', '=', 'check_crc', ':', 'self', '.', '_handle_copyToLocal', '(', 'path', ',', 'node', ',', 'dst', ',', 'check_crc', ')', 'for', 'path', 'in', 'paths', ':', 'self', '.', 'base_source', '=', 'None', 'for', 'item', 'in', 'self', '.', '_find_items', '(', '[', 'path', ']', ',', 'processor', ',', 'include_toplevel', '=', 'True', ',', 'recurse', '=', 'True', ',', 'include_children', '=', 'True', ')', ':', 'if', 'item', ':', 'yield', 'item']
Copy files that match the file source pattern to the local name. Source is kept. When copying multiple, files, the destination must be a directory. :param paths: Paths to copy :type paths: list of strings :param dst: Destination path :type dst: string :param check_crc: Check for checksum errors :type check_crc: boolean :returns: a generator that yields strings
['Copy', 'files', 'that', 'match', 'the', 'file', 'source', 'pattern', 'to', 'the', 'local', 'name', '.', 'Source', 'is', 'kept', '.', 'When', 'copying', 'multiple', 'files', 'the', 'destination', 'must', 'be', 'a', 'directory', '.']
train
https://github.com/spotify/snakebite/blob/6a456e6100b0c1be66cc1f7f9d7f50494f369da3/snakebite/client.py#L707-L734
8,264
molmod/molmod
molmod/examples/003_internal_coordinates/c_ff_hessian.py
ForceField.hessian
def hessian(self, coordinates): """Compute the force-field Hessian for the given coordinates. Argument: | ``coordinates`` -- A numpy array with the Cartesian atom coordinates, with shape (N,3). Returns: | ``hessian`` -- A numpy array with the Hessian, with shape (3*N, 3*N). """ # N3 is 3 times the number of atoms. N3 = coordinates.size # Start with a zero hessian. hessian = numpy.zeros((N3,N3), float) # Add the contribution of each term. for term in self.terms: term.add_to_hessian(coordinates, hessian) return hessian
python
def hessian(self, coordinates): """Compute the force-field Hessian for the given coordinates. Argument: | ``coordinates`` -- A numpy array with the Cartesian atom coordinates, with shape (N,3). Returns: | ``hessian`` -- A numpy array with the Hessian, with shape (3*N, 3*N). """ # N3 is 3 times the number of atoms. N3 = coordinates.size # Start with a zero hessian. hessian = numpy.zeros((N3,N3), float) # Add the contribution of each term. for term in self.terms: term.add_to_hessian(coordinates, hessian) return hessian
['def', 'hessian', '(', 'self', ',', 'coordinates', ')', ':', '# N3 is 3 times the number of atoms.', 'N3', '=', 'coordinates', '.', 'size', '# Start with a zero hessian.', 'hessian', '=', 'numpy', '.', 'zeros', '(', '(', 'N3', ',', 'N3', ')', ',', 'float', ')', '# Add the contribution of each term.', 'for', 'term', 'in', 'self', '.', 'terms', ':', 'term', '.', 'add_to_hessian', '(', 'coordinates', ',', 'hessian', ')', 'return', 'hessian']
Compute the force-field Hessian for the given coordinates. Argument: | ``coordinates`` -- A numpy array with the Cartesian atom coordinates, with shape (N,3). Returns: | ``hessian`` -- A numpy array with the Hessian, with shape (3*N, 3*N).
['Compute', 'the', 'force', '-', 'field', 'Hessian', 'for', 'the', 'given', 'coordinates', '.']
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/examples/003_internal_coordinates/c_ff_hessian.py#L85-L103
8,265
pyviz/holoviews
holoviews/plotting/bokeh/element.py
OverlayPlot.update_frame
def update_frame(self, key, ranges=None, element=None): """ Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state. """ reused = isinstance(self.hmap, DynamicMap) and self.overlaid if not reused and element is None: element = self._get_frame(key) elif element is not None: self.current_frame = element self.current_key = key items = [] if element is None else list(element.data.items()) if isinstance(self.hmap, DynamicMap): range_obj = element else: range_obj = self.hmap if element is not None: ranges = self.compute_ranges(range_obj, key, ranges) # Update plot options plot_opts = self.lookup_options(element, 'plot').options inherited = self._traverse_options(element, 'plot', self._propagate_options, defaults=False) plot_opts.update(**{k: v[0] for k, v in inherited.items() if k not in plot_opts}) self.param.set_param(**plot_opts) if element and not self.overlaid and not self.tabs and not self.batched: self._update_ranges(element, ranges) # Determine which stream (if any) triggered the update triggering = [stream for stream in self.streams if stream._triggering] for k, subplot in self.subplots.items(): el = None # If in Dynamic mode propagate elements to subplots if isinstance(self.hmap, DynamicMap) and element: # In batched mode NdOverlay is passed to subplot directly if self.batched: el = element # If not batched get the Element matching the subplot elif element is not None: idx, spec, exact = dynamic_update(self, subplot, k, element, items) if idx is not None: _, el = items.pop(idx) if not exact: self._update_subplot(subplot, spec) # Skip updates to subplots when its streams is not one of # the streams that initiated the update if (triggering and all(s not in triggering for s in subplot.streams) and not subplot in self.dynamic_subplots): continue subplot.update_frame(key, ranges, element=el) if not self.batched and isinstance(self.hmap, DynamicMap) and items: init_kwargs = {'plots': self.handles['plots']} if not self.tabs: init_kwargs['plot'] = self.handles['plot'] self._create_dynamic_subplots(key, items, ranges, **init_kwargs) if not self.overlaid and not self.tabs: self._process_legend() if element and not self.overlaid and not self.tabs and not self.batched: plot = self.handles['plot'] self._update_plot(key, plot, element) self._set_active_tools(plot) self._process_legend() self._execute_hooks(element)
python
def update_frame(self, key, ranges=None, element=None): """ Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state. """ reused = isinstance(self.hmap, DynamicMap) and self.overlaid if not reused and element is None: element = self._get_frame(key) elif element is not None: self.current_frame = element self.current_key = key items = [] if element is None else list(element.data.items()) if isinstance(self.hmap, DynamicMap): range_obj = element else: range_obj = self.hmap if element is not None: ranges = self.compute_ranges(range_obj, key, ranges) # Update plot options plot_opts = self.lookup_options(element, 'plot').options inherited = self._traverse_options(element, 'plot', self._propagate_options, defaults=False) plot_opts.update(**{k: v[0] for k, v in inherited.items() if k not in plot_opts}) self.param.set_param(**plot_opts) if element and not self.overlaid and not self.tabs and not self.batched: self._update_ranges(element, ranges) # Determine which stream (if any) triggered the update triggering = [stream for stream in self.streams if stream._triggering] for k, subplot in self.subplots.items(): el = None # If in Dynamic mode propagate elements to subplots if isinstance(self.hmap, DynamicMap) and element: # In batched mode NdOverlay is passed to subplot directly if self.batched: el = element # If not batched get the Element matching the subplot elif element is not None: idx, spec, exact = dynamic_update(self, subplot, k, element, items) if idx is not None: _, el = items.pop(idx) if not exact: self._update_subplot(subplot, spec) # Skip updates to subplots when its streams is not one of # the streams that initiated the update if (triggering and all(s not in triggering for s in subplot.streams) and not subplot in self.dynamic_subplots): continue subplot.update_frame(key, ranges, element=el) if not self.batched and isinstance(self.hmap, DynamicMap) and items: init_kwargs = {'plots': self.handles['plots']} if not self.tabs: init_kwargs['plot'] = self.handles['plot'] self._create_dynamic_subplots(key, items, ranges, **init_kwargs) if not self.overlaid and not self.tabs: self._process_legend() if element and not self.overlaid and not self.tabs and not self.batched: plot = self.handles['plot'] self._update_plot(key, plot, element) self._set_active_tools(plot) self._process_legend() self._execute_hooks(element)
['def', 'update_frame', '(', 'self', ',', 'key', ',', 'ranges', '=', 'None', ',', 'element', '=', 'None', ')', ':', 'reused', '=', 'isinstance', '(', 'self', '.', 'hmap', ',', 'DynamicMap', ')', 'and', 'self', '.', 'overlaid', 'if', 'not', 'reused', 'and', 'element', 'is', 'None', ':', 'element', '=', 'self', '.', '_get_frame', '(', 'key', ')', 'elif', 'element', 'is', 'not', 'None', ':', 'self', '.', 'current_frame', '=', 'element', 'self', '.', 'current_key', '=', 'key', 'items', '=', '[', ']', 'if', 'element', 'is', 'None', 'else', 'list', '(', 'element', '.', 'data', '.', 'items', '(', ')', ')', 'if', 'isinstance', '(', 'self', '.', 'hmap', ',', 'DynamicMap', ')', ':', 'range_obj', '=', 'element', 'else', ':', 'range_obj', '=', 'self', '.', 'hmap', 'if', 'element', 'is', 'not', 'None', ':', 'ranges', '=', 'self', '.', 'compute_ranges', '(', 'range_obj', ',', 'key', ',', 'ranges', ')', '# Update plot options', 'plot_opts', '=', 'self', '.', 'lookup_options', '(', 'element', ',', "'plot'", ')', '.', 'options', 'inherited', '=', 'self', '.', '_traverse_options', '(', 'element', ',', "'plot'", ',', 'self', '.', '_propagate_options', ',', 'defaults', '=', 'False', ')', 'plot_opts', '.', 'update', '(', '*', '*', '{', 'k', ':', 'v', '[', '0', ']', 'for', 'k', ',', 'v', 'in', 'inherited', '.', 'items', '(', ')', 'if', 'k', 'not', 'in', 'plot_opts', '}', ')', 'self', '.', 'param', '.', 'set_param', '(', '*', '*', 'plot_opts', ')', 'if', 'element', 'and', 'not', 'self', '.', 'overlaid', 'and', 'not', 'self', '.', 'tabs', 'and', 'not', 'self', '.', 'batched', ':', 'self', '.', '_update_ranges', '(', 'element', ',', 'ranges', ')', '# Determine which stream (if any) triggered the update', 'triggering', '=', '[', 'stream', 'for', 'stream', 'in', 'self', '.', 'streams', 'if', 'stream', '.', '_triggering', ']', 'for', 'k', ',', 'subplot', 'in', 'self', '.', 'subplots', '.', 'items', '(', ')', ':', 'el', '=', 'None', '# If in Dynamic mode propagate elements to subplots', 'if', 'isinstance', '(', 'self', '.', 'hmap', ',', 'DynamicMap', ')', 'and', 'element', ':', '# In batched mode NdOverlay is passed to subplot directly', 'if', 'self', '.', 'batched', ':', 'el', '=', 'element', '# If not batched get the Element matching the subplot', 'elif', 'element', 'is', 'not', 'None', ':', 'idx', ',', 'spec', ',', 'exact', '=', 'dynamic_update', '(', 'self', ',', 'subplot', ',', 'k', ',', 'element', ',', 'items', ')', 'if', 'idx', 'is', 'not', 'None', ':', '_', ',', 'el', '=', 'items', '.', 'pop', '(', 'idx', ')', 'if', 'not', 'exact', ':', 'self', '.', '_update_subplot', '(', 'subplot', ',', 'spec', ')', '# Skip updates to subplots when its streams is not one of', '# the streams that initiated the update', 'if', '(', 'triggering', 'and', 'all', '(', 's', 'not', 'in', 'triggering', 'for', 's', 'in', 'subplot', '.', 'streams', ')', 'and', 'not', 'subplot', 'in', 'self', '.', 'dynamic_subplots', ')', ':', 'continue', 'subplot', '.', 'update_frame', '(', 'key', ',', 'ranges', ',', 'element', '=', 'el', ')', 'if', 'not', 'self', '.', 'batched', 'and', 'isinstance', '(', 'self', '.', 'hmap', ',', 'DynamicMap', ')', 'and', 'items', ':', 'init_kwargs', '=', '{', "'plots'", ':', 'self', '.', 'handles', '[', "'plots'", ']', '}', 'if', 'not', 'self', '.', 'tabs', ':', 'init_kwargs', '[', "'plot'", ']', '=', 'self', '.', 'handles', '[', "'plot'", ']', 'self', '.', '_create_dynamic_subplots', '(', 'key', ',', 'items', ',', 'ranges', ',', '*', '*', 'init_kwargs', ')', 'if', 'not', 'self', '.', 'overlaid', 'and', 'not', 'self', '.', 'tabs', ':', 'self', '.', '_process_legend', '(', ')', 'if', 'element', 'and', 'not', 'self', '.', 'overlaid', 'and', 'not', 'self', '.', 'tabs', 'and', 'not', 'self', '.', 'batched', ':', 'plot', '=', 'self', '.', 'handles', '[', "'plot'", ']', 'self', '.', '_update_plot', '(', 'key', ',', 'plot', ',', 'element', ')', 'self', '.', '_set_active_tools', '(', 'plot', ')', 'self', '.', '_process_legend', '(', ')', 'self', '.', '_execute_hooks', '(', 'element', ')']
Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state.
['Update', 'the', 'internal', 'state', 'of', 'the', 'Plot', 'to', 'represent', 'the', 'given', 'key', 'tuple', '(', 'where', 'integers', 'represent', 'frames', ')', '.', 'Returns', 'this', 'state', '.']
train
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/element.py#L2106-L2180
8,266
ForensicArtifacts/artifacts
artifacts/registry.py
ArtifactDefinitionsRegistry.ReadFromFile
def ReadFromFile(self, artifacts_reader, filename): """Reads artifact definitions into the registry from a file. Args: artifacts_reader (ArtifactsReader): an artifacts reader. filename (str): name of the file to read from. """ for artifact_definition in artifacts_reader.ReadFile(filename): self.RegisterDefinition(artifact_definition)
python
def ReadFromFile(self, artifacts_reader, filename): """Reads artifact definitions into the registry from a file. Args: artifacts_reader (ArtifactsReader): an artifacts reader. filename (str): name of the file to read from. """ for artifact_definition in artifacts_reader.ReadFile(filename): self.RegisterDefinition(artifact_definition)
['def', 'ReadFromFile', '(', 'self', ',', 'artifacts_reader', ',', 'filename', ')', ':', 'for', 'artifact_definition', 'in', 'artifacts_reader', '.', 'ReadFile', '(', 'filename', ')', ':', 'self', '.', 'RegisterDefinition', '(', 'artifact_definition', ')']
Reads artifact definitions into the registry from a file. Args: artifacts_reader (ArtifactsReader): an artifacts reader. filename (str): name of the file to read from.
['Reads', 'artifact', 'definitions', 'into', 'the', 'registry', 'from', 'a', 'file', '.']
train
https://github.com/ForensicArtifacts/artifacts/blob/044a63bfb4448af33d085c69066c80f9505ae7ca/artifacts/registry.py#L200-L208
8,267
fabioz/PyDev.Debugger
third_party/pep8/lib2to3/lib2to3/pytree.py
WildcardPattern.optimize
def optimize(self): """Optimize certain stacked wildcard patterns.""" subpattern = None if (self.content is not None and len(self.content) == 1 and len(self.content[0]) == 1): subpattern = self.content[0][0] if self.min == 1 and self.max == 1: if self.content is None: return NodePattern(name=self.name) if subpattern is not None and self.name == subpattern.name: return subpattern.optimize() if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and subpattern.min <= 1 and self.name == subpattern.name): return WildcardPattern(subpattern.content, self.min*subpattern.min, self.max*subpattern.max, subpattern.name) return self
python
def optimize(self): """Optimize certain stacked wildcard patterns.""" subpattern = None if (self.content is not None and len(self.content) == 1 and len(self.content[0]) == 1): subpattern = self.content[0][0] if self.min == 1 and self.max == 1: if self.content is None: return NodePattern(name=self.name) if subpattern is not None and self.name == subpattern.name: return subpattern.optimize() if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and subpattern.min <= 1 and self.name == subpattern.name): return WildcardPattern(subpattern.content, self.min*subpattern.min, self.max*subpattern.max, subpattern.name) return self
['def', 'optimize', '(', 'self', ')', ':', 'subpattern', '=', 'None', 'if', '(', 'self', '.', 'content', 'is', 'not', 'None', 'and', 'len', '(', 'self', '.', 'content', ')', '==', '1', 'and', 'len', '(', 'self', '.', 'content', '[', '0', ']', ')', '==', '1', ')', ':', 'subpattern', '=', 'self', '.', 'content', '[', '0', ']', '[', '0', ']', 'if', 'self', '.', 'min', '==', '1', 'and', 'self', '.', 'max', '==', '1', ':', 'if', 'self', '.', 'content', 'is', 'None', ':', 'return', 'NodePattern', '(', 'name', '=', 'self', '.', 'name', ')', 'if', 'subpattern', 'is', 'not', 'None', 'and', 'self', '.', 'name', '==', 'subpattern', '.', 'name', ':', 'return', 'subpattern', '.', 'optimize', '(', ')', 'if', '(', 'self', '.', 'min', '<=', '1', 'and', 'isinstance', '(', 'subpattern', ',', 'WildcardPattern', ')', 'and', 'subpattern', '.', 'min', '<=', '1', 'and', 'self', '.', 'name', '==', 'subpattern', '.', 'name', ')', ':', 'return', 'WildcardPattern', '(', 'subpattern', '.', 'content', ',', 'self', '.', 'min', '*', 'subpattern', '.', 'min', ',', 'self', '.', 'max', '*', 'subpattern', '.', 'max', ',', 'subpattern', '.', 'name', ')', 'return', 'self']
Optimize certain stacked wildcard patterns.
['Optimize', 'certain', 'stacked', 'wildcard', 'patterns', '.']
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pytree.py#L688-L705
8,268
ionelmc/python-hunter
src/hunter/tracer.py
Tracer.stop
def stop(self): """ Stop tracing. Reinstalls the :ref:`hunter.Tracer.previous` tracer. """ if self._handler is not None: sys.settrace(self._previous) self._handler = self._previous = None if self.threading_support is None or self.threading_support: threading.settrace(self._threading_previous) self._threading_previous = None
python
def stop(self): """ Stop tracing. Reinstalls the :ref:`hunter.Tracer.previous` tracer. """ if self._handler is not None: sys.settrace(self._previous) self._handler = self._previous = None if self.threading_support is None or self.threading_support: threading.settrace(self._threading_previous) self._threading_previous = None
['def', 'stop', '(', 'self', ')', ':', 'if', 'self', '.', '_handler', 'is', 'not', 'None', ':', 'sys', '.', 'settrace', '(', 'self', '.', '_previous', ')', 'self', '.', '_handler', '=', 'self', '.', '_previous', '=', 'None', 'if', 'self', '.', 'threading_support', 'is', 'None', 'or', 'self', '.', 'threading_support', ':', 'threading', '.', 'settrace', '(', 'self', '.', '_threading_previous', ')', 'self', '.', '_threading_previous', '=', 'None']
Stop tracing. Reinstalls the :ref:`hunter.Tracer.previous` tracer.
['Stop', 'tracing', '.', 'Reinstalls', 'the', ':', 'ref', ':', 'hunter', '.', 'Tracer', '.', 'previous', 'tracer', '.']
train
https://github.com/ionelmc/python-hunter/blob/b3a1310b0593d2c6b6ef430883843896e17d6a81/src/hunter/tracer.py#L98-L107
8,269
materialsproject/pymatgen
pymatgen/analysis/surface_analysis.py
SurfaceEnergyPlotter.stable_u_range_dict
def stable_u_range_dict(self, chempot_range, ref_delu, no_doped=True, no_clean=False, delu_dict={}, miller_index=(), dmu_at_0=False, return_se_dict=False): """ Creates a dictionary where each entry is a key pointing to a chemical potential range where the surface of that entry is stable. Does so by enumerating through all possible solutions (intersect) for surface energies of a specific facet. Args: chempot_range ([max_chempot, min_chempot]): Range to consider the stability of the slabs. ref_delu (sympy Symbol): The range stability of each slab is based on the chempot range of this chempot. Should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element no_doped (bool): Consider stability of clean slabs only. no_clean (bool): Consider stability of doped slabs only. delu_dict (Dict): Dictionary of the chemical potentials to be set as constant. Note the key should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element. miller_index (list): Miller index for a specific facet to get a dictionary for. dmu_at_0 (bool): If True, if the surface energies corresponding to the chemical potential range is between a negative and positive value, the value is a list of three chemical potentials with the one in the center corresponding a surface energy of 0. Uselful in identifying unphysical ranges of surface energies and their chemical potential range. return_se_dict (bool): Whether or not to return the corresponding dictionary of surface energies """ chempot_range = sorted(chempot_range) stable_urange_dict, se_dict = {}, {} # Get all entries for a specific facet for hkl in self.all_slab_entries.keys(): entries_in_hkl = [] # Skip this facet if this is not the facet we want if miller_index and hkl != tuple(miller_index): continue if not no_clean: entries_in_hkl.extend([clean for clean in self.all_slab_entries[hkl]]) if not no_doped: for entry in self.all_slab_entries[hkl]: entries_in_hkl.extend([ads_entry for ads_entry in self.all_slab_entries[hkl][entry]]) for entry in entries_in_hkl: stable_urange_dict[entry] = [] se_dict[entry] = [] # if there is only one entry for this facet, then just give it the # default urange, you can't make combinations with just 1 item if len(entries_in_hkl) == 1: stable_urange_dict[entries_in_hkl[0]] = chempot_range u1, u2 = delu_dict.copy(), delu_dict.copy() u1[ref_delu], u2[ref_delu] = chempot_range[0], chempot_range[1] se = self.as_coeffs_dict[entries_in_hkl[0]] se_dict[entries_in_hkl[0]] = [sub_chempots(se, u1), sub_chempots(se, u2)] continue for pair in itertools.combinations(entries_in_hkl, 2): # I'm assuming ref_delu was not set in delu_dict, # so the solution should be for ref_delu solution = self.get_surface_equilibrium(pair, delu_dict=delu_dict) # Check if this solution is stable if not solution: continue new_delu_dict = delu_dict.copy() new_delu_dict[ref_delu] = solution[ref_delu] stable_entry, gamma = self.get_stable_entry_at_u(hkl, new_delu_dict, no_doped=no_doped, no_clean=no_clean) if stable_entry not in pair: continue # Now check if the solution is within the chempot range if not (chempot_range[0] <= solution[ref_delu] <= chempot_range[1]): continue for entry in pair: stable_urange_dict[entry].append(solution[ref_delu]) se_dict[entry].append(gamma) # Now check if all entries have 2 chempot values. If only # one, we need to set the other value as either the upper # limit or lower limit of the user provided chempot_range new_delu_dict = delu_dict.copy() for u in chempot_range: new_delu_dict[ref_delu] = u entry, gamma = self.get_stable_entry_at_u(hkl, delu_dict=new_delu_dict, no_doped=no_doped, no_clean=no_clean) stable_urange_dict[entry].append(u) se_dict[entry].append(gamma) if dmu_at_0: for entry in se_dict.keys(): # if se are of opposite sign, determine chempot when se=0. # Useful for finding a chempot range where se is unphysical if not stable_urange_dict[entry]: continue if se_dict[entry][0] * se_dict[entry][1] < 0: # solve for gamma=0 se = self.as_coeffs_dict[entry] se_dict[entry].append(0) stable_urange_dict[entry].append(solve(sub_chempots(se, delu_dict), ref_delu)[0]) # sort the chempot ranges for each facet for entry in stable_urange_dict.keys(): se_dict[entry] = [se for i, se in sorted(zip(stable_urange_dict[entry], se_dict[entry]))] stable_urange_dict[entry] = sorted(stable_urange_dict[entry]) if return_se_dict: return stable_urange_dict, se_dict else: return stable_urange_dict
python
def stable_u_range_dict(self, chempot_range, ref_delu, no_doped=True, no_clean=False, delu_dict={}, miller_index=(), dmu_at_0=False, return_se_dict=False): """ Creates a dictionary where each entry is a key pointing to a chemical potential range where the surface of that entry is stable. Does so by enumerating through all possible solutions (intersect) for surface energies of a specific facet. Args: chempot_range ([max_chempot, min_chempot]): Range to consider the stability of the slabs. ref_delu (sympy Symbol): The range stability of each slab is based on the chempot range of this chempot. Should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element no_doped (bool): Consider stability of clean slabs only. no_clean (bool): Consider stability of doped slabs only. delu_dict (Dict): Dictionary of the chemical potentials to be set as constant. Note the key should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element. miller_index (list): Miller index for a specific facet to get a dictionary for. dmu_at_0 (bool): If True, if the surface energies corresponding to the chemical potential range is between a negative and positive value, the value is a list of three chemical potentials with the one in the center corresponding a surface energy of 0. Uselful in identifying unphysical ranges of surface energies and their chemical potential range. return_se_dict (bool): Whether or not to return the corresponding dictionary of surface energies """ chempot_range = sorted(chempot_range) stable_urange_dict, se_dict = {}, {} # Get all entries for a specific facet for hkl in self.all_slab_entries.keys(): entries_in_hkl = [] # Skip this facet if this is not the facet we want if miller_index and hkl != tuple(miller_index): continue if not no_clean: entries_in_hkl.extend([clean for clean in self.all_slab_entries[hkl]]) if not no_doped: for entry in self.all_slab_entries[hkl]: entries_in_hkl.extend([ads_entry for ads_entry in self.all_slab_entries[hkl][entry]]) for entry in entries_in_hkl: stable_urange_dict[entry] = [] se_dict[entry] = [] # if there is only one entry for this facet, then just give it the # default urange, you can't make combinations with just 1 item if len(entries_in_hkl) == 1: stable_urange_dict[entries_in_hkl[0]] = chempot_range u1, u2 = delu_dict.copy(), delu_dict.copy() u1[ref_delu], u2[ref_delu] = chempot_range[0], chempot_range[1] se = self.as_coeffs_dict[entries_in_hkl[0]] se_dict[entries_in_hkl[0]] = [sub_chempots(se, u1), sub_chempots(se, u2)] continue for pair in itertools.combinations(entries_in_hkl, 2): # I'm assuming ref_delu was not set in delu_dict, # so the solution should be for ref_delu solution = self.get_surface_equilibrium(pair, delu_dict=delu_dict) # Check if this solution is stable if not solution: continue new_delu_dict = delu_dict.copy() new_delu_dict[ref_delu] = solution[ref_delu] stable_entry, gamma = self.get_stable_entry_at_u(hkl, new_delu_dict, no_doped=no_doped, no_clean=no_clean) if stable_entry not in pair: continue # Now check if the solution is within the chempot range if not (chempot_range[0] <= solution[ref_delu] <= chempot_range[1]): continue for entry in pair: stable_urange_dict[entry].append(solution[ref_delu]) se_dict[entry].append(gamma) # Now check if all entries have 2 chempot values. If only # one, we need to set the other value as either the upper # limit or lower limit of the user provided chempot_range new_delu_dict = delu_dict.copy() for u in chempot_range: new_delu_dict[ref_delu] = u entry, gamma = self.get_stable_entry_at_u(hkl, delu_dict=new_delu_dict, no_doped=no_doped, no_clean=no_clean) stable_urange_dict[entry].append(u) se_dict[entry].append(gamma) if dmu_at_0: for entry in se_dict.keys(): # if se are of opposite sign, determine chempot when se=0. # Useful for finding a chempot range where se is unphysical if not stable_urange_dict[entry]: continue if se_dict[entry][0] * se_dict[entry][1] < 0: # solve for gamma=0 se = self.as_coeffs_dict[entry] se_dict[entry].append(0) stable_urange_dict[entry].append(solve(sub_chempots(se, delu_dict), ref_delu)[0]) # sort the chempot ranges for each facet for entry in stable_urange_dict.keys(): se_dict[entry] = [se for i, se in sorted(zip(stable_urange_dict[entry], se_dict[entry]))] stable_urange_dict[entry] = sorted(stable_urange_dict[entry]) if return_se_dict: return stable_urange_dict, se_dict else: return stable_urange_dict
['def', 'stable_u_range_dict', '(', 'self', ',', 'chempot_range', ',', 'ref_delu', ',', 'no_doped', '=', 'True', ',', 'no_clean', '=', 'False', ',', 'delu_dict', '=', '{', '}', ',', 'miller_index', '=', '(', ')', ',', 'dmu_at_0', '=', 'False', ',', 'return_se_dict', '=', 'False', ')', ':', 'chempot_range', '=', 'sorted', '(', 'chempot_range', ')', 'stable_urange_dict', ',', 'se_dict', '=', '{', '}', ',', '{', '}', '# Get all entries for a specific facet', 'for', 'hkl', 'in', 'self', '.', 'all_slab_entries', '.', 'keys', '(', ')', ':', 'entries_in_hkl', '=', '[', ']', '# Skip this facet if this is not the facet we want', 'if', 'miller_index', 'and', 'hkl', '!=', 'tuple', '(', 'miller_index', ')', ':', 'continue', 'if', 'not', 'no_clean', ':', 'entries_in_hkl', '.', 'extend', '(', '[', 'clean', 'for', 'clean', 'in', 'self', '.', 'all_slab_entries', '[', 'hkl', ']', ']', ')', 'if', 'not', 'no_doped', ':', 'for', 'entry', 'in', 'self', '.', 'all_slab_entries', '[', 'hkl', ']', ':', 'entries_in_hkl', '.', 'extend', '(', '[', 'ads_entry', 'for', 'ads_entry', 'in', 'self', '.', 'all_slab_entries', '[', 'hkl', ']', '[', 'entry', ']', ']', ')', 'for', 'entry', 'in', 'entries_in_hkl', ':', 'stable_urange_dict', '[', 'entry', ']', '=', '[', ']', 'se_dict', '[', 'entry', ']', '=', '[', ']', '# if there is only one entry for this facet, then just give it the', "# default urange, you can't make combinations with just 1 item", 'if', 'len', '(', 'entries_in_hkl', ')', '==', '1', ':', 'stable_urange_dict', '[', 'entries_in_hkl', '[', '0', ']', ']', '=', 'chempot_range', 'u1', ',', 'u2', '=', 'delu_dict', '.', 'copy', '(', ')', ',', 'delu_dict', '.', 'copy', '(', ')', 'u1', '[', 'ref_delu', ']', ',', 'u2', '[', 'ref_delu', ']', '=', 'chempot_range', '[', '0', ']', ',', 'chempot_range', '[', '1', ']', 'se', '=', 'self', '.', 'as_coeffs_dict', '[', 'entries_in_hkl', '[', '0', ']', ']', 'se_dict', '[', 'entries_in_hkl', '[', '0', ']', ']', '=', '[', 'sub_chempots', '(', 'se', ',', 'u1', ')', ',', 'sub_chempots', '(', 'se', ',', 'u2', ')', ']', 'continue', 'for', 'pair', 'in', 'itertools', '.', 'combinations', '(', 'entries_in_hkl', ',', '2', ')', ':', "# I'm assuming ref_delu was not set in delu_dict,", '# so the solution should be for ref_delu', 'solution', '=', 'self', '.', 'get_surface_equilibrium', '(', 'pair', ',', 'delu_dict', '=', 'delu_dict', ')', '# Check if this solution is stable', 'if', 'not', 'solution', ':', 'continue', 'new_delu_dict', '=', 'delu_dict', '.', 'copy', '(', ')', 'new_delu_dict', '[', 'ref_delu', ']', '=', 'solution', '[', 'ref_delu', ']', 'stable_entry', ',', 'gamma', '=', 'self', '.', 'get_stable_entry_at_u', '(', 'hkl', ',', 'new_delu_dict', ',', 'no_doped', '=', 'no_doped', ',', 'no_clean', '=', 'no_clean', ')', 'if', 'stable_entry', 'not', 'in', 'pair', ':', 'continue', '# Now check if the solution is within the chempot range', 'if', 'not', '(', 'chempot_range', '[', '0', ']', '<=', 'solution', '[', 'ref_delu', ']', '<=', 'chempot_range', '[', '1', ']', ')', ':', 'continue', 'for', 'entry', 'in', 'pair', ':', 'stable_urange_dict', '[', 'entry', ']', '.', 'append', '(', 'solution', '[', 'ref_delu', ']', ')', 'se_dict', '[', 'entry', ']', '.', 'append', '(', 'gamma', ')', '# Now check if all entries have 2 chempot values. If only', '# one, we need to set the other value as either the upper', '# limit or lower limit of the user provided chempot_range', 'new_delu_dict', '=', 'delu_dict', '.', 'copy', '(', ')', 'for', 'u', 'in', 'chempot_range', ':', 'new_delu_dict', '[', 'ref_delu', ']', '=', 'u', 'entry', ',', 'gamma', '=', 'self', '.', 'get_stable_entry_at_u', '(', 'hkl', ',', 'delu_dict', '=', 'new_delu_dict', ',', 'no_doped', '=', 'no_doped', ',', 'no_clean', '=', 'no_clean', ')', 'stable_urange_dict', '[', 'entry', ']', '.', 'append', '(', 'u', ')', 'se_dict', '[', 'entry', ']', '.', 'append', '(', 'gamma', ')', 'if', 'dmu_at_0', ':', 'for', 'entry', 'in', 'se_dict', '.', 'keys', '(', ')', ':', '# if se are of opposite sign, determine chempot when se=0.', '# Useful for finding a chempot range where se is unphysical', 'if', 'not', 'stable_urange_dict', '[', 'entry', ']', ':', 'continue', 'if', 'se_dict', '[', 'entry', ']', '[', '0', ']', '*', 'se_dict', '[', 'entry', ']', '[', '1', ']', '<', '0', ':', '# solve for gamma=0', 'se', '=', 'self', '.', 'as_coeffs_dict', '[', 'entry', ']', 'se_dict', '[', 'entry', ']', '.', 'append', '(', '0', ')', 'stable_urange_dict', '[', 'entry', ']', '.', 'append', '(', 'solve', '(', 'sub_chempots', '(', 'se', ',', 'delu_dict', ')', ',', 'ref_delu', ')', '[', '0', ']', ')', '# sort the chempot ranges for each facet', 'for', 'entry', 'in', 'stable_urange_dict', '.', 'keys', '(', ')', ':', 'se_dict', '[', 'entry', ']', '=', '[', 'se', 'for', 'i', ',', 'se', 'in', 'sorted', '(', 'zip', '(', 'stable_urange_dict', '[', 'entry', ']', ',', 'se_dict', '[', 'entry', ']', ')', ')', ']', 'stable_urange_dict', '[', 'entry', ']', '=', 'sorted', '(', 'stable_urange_dict', '[', 'entry', ']', ')', 'if', 'return_se_dict', ':', 'return', 'stable_urange_dict', ',', 'se_dict', 'else', ':', 'return', 'stable_urange_dict']
Creates a dictionary where each entry is a key pointing to a chemical potential range where the surface of that entry is stable. Does so by enumerating through all possible solutions (intersect) for surface energies of a specific facet. Args: chempot_range ([max_chempot, min_chempot]): Range to consider the stability of the slabs. ref_delu (sympy Symbol): The range stability of each slab is based on the chempot range of this chempot. Should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element no_doped (bool): Consider stability of clean slabs only. no_clean (bool): Consider stability of doped slabs only. delu_dict (Dict): Dictionary of the chemical potentials to be set as constant. Note the key should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element. miller_index (list): Miller index for a specific facet to get a dictionary for. dmu_at_0 (bool): If True, if the surface energies corresponding to the chemical potential range is between a negative and positive value, the value is a list of three chemical potentials with the one in the center corresponding a surface energy of 0. Uselful in identifying unphysical ranges of surface energies and their chemical potential range. return_se_dict (bool): Whether or not to return the corresponding dictionary of surface energies
['Creates', 'a', 'dictionary', 'where', 'each', 'entry', 'is', 'a', 'key', 'pointing', 'to', 'a', 'chemical', 'potential', 'range', 'where', 'the', 'surface', 'of', 'that', 'entry', 'is', 'stable', '.', 'Does', 'so', 'by', 'enumerating', 'through', 'all', 'possible', 'solutions', '(', 'intersect', ')', 'for', 'surface', 'energies', 'of', 'a', 'specific', 'facet', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/surface_analysis.py#L661-L781
8,270
pycontribs/pyrax
pyrax/object_storage.py
ContainerManager.delete_account_metadata
def delete_account_metadata(self, prefix=None): """ Removes all metadata matching the specified prefix from the account. By default, the standard account metadata prefix ('X-Account-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string. """ # Add the metadata prefix, if needed. if prefix is None: prefix = ACCOUNT_META_PREFIX curr_meta = self.get_account_metadata(prefix=prefix) for ckey in curr_meta: curr_meta[ckey] = "" new_meta = _massage_metakeys(curr_meta, prefix) uri = "/" resp, resp_body = self.api.method_post(uri, headers=new_meta) return 200 <= resp.status_code <= 299
python
def delete_account_metadata(self, prefix=None): """ Removes all metadata matching the specified prefix from the account. By default, the standard account metadata prefix ('X-Account-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string. """ # Add the metadata prefix, if needed. if prefix is None: prefix = ACCOUNT_META_PREFIX curr_meta = self.get_account_metadata(prefix=prefix) for ckey in curr_meta: curr_meta[ckey] = "" new_meta = _massage_metakeys(curr_meta, prefix) uri = "/" resp, resp_body = self.api.method_post(uri, headers=new_meta) return 200 <= resp.status_code <= 299
['def', 'delete_account_metadata', '(', 'self', ',', 'prefix', '=', 'None', ')', ':', '# Add the metadata prefix, if needed.', 'if', 'prefix', 'is', 'None', ':', 'prefix', '=', 'ACCOUNT_META_PREFIX', 'curr_meta', '=', 'self', '.', 'get_account_metadata', '(', 'prefix', '=', 'prefix', ')', 'for', 'ckey', 'in', 'curr_meta', ':', 'curr_meta', '[', 'ckey', ']', '=', '""', 'new_meta', '=', '_massage_metakeys', '(', 'curr_meta', ',', 'prefix', ')', 'uri', '=', '"/"', 'resp', ',', 'resp_body', '=', 'self', '.', 'api', '.', 'method_post', '(', 'uri', ',', 'headers', '=', 'new_meta', ')', 'return', '200', '<=', 'resp', '.', 'status_code', '<=', '299']
Removes all metadata matching the specified prefix from the account. By default, the standard account metadata prefix ('X-Account-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string.
['Removes', 'all', 'metadata', 'matching', 'the', 'specified', 'prefix', 'from', 'the', 'account', '.']
train
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L948-L965
8,271
respeaker/respeaker_python_library
respeaker/usb_hid/hidapi_backend.py
HidApiUSB.write
def write(self, data): """ write data on the OUT endpoint associated to the HID interface """ for _ in range(64 - len(data)): data.append(0) #logging.debug("send: %s", data) self.device.write(bytearray([0]) + data) return
python
def write(self, data): """ write data on the OUT endpoint associated to the HID interface """ for _ in range(64 - len(data)): data.append(0) #logging.debug("send: %s", data) self.device.write(bytearray([0]) + data) return
['def', 'write', '(', 'self', ',', 'data', ')', ':', 'for', '_', 'in', 'range', '(', '64', '-', 'len', '(', 'data', ')', ')', ':', 'data', '.', 'append', '(', '0', ')', '#logging.debug("send: %s", data)', 'self', '.', 'device', '.', 'write', '(', 'bytearray', '(', '[', '0', ']', ')', '+', 'data', ')', 'return']
write data on the OUT endpoint associated to the HID interface
['write', 'data', 'on', 'the', 'OUT', 'endpoint', 'associated', 'to', 'the', 'HID', 'interface']
train
https://github.com/respeaker/respeaker_python_library/blob/905a5334ccdc2d474ad973caf6a23d05c65bbb25/respeaker/usb_hid/hidapi_backend.py#L98-L106
8,272
michael-lazar/rtv
rtv/terminal.py
Terminal.strip_textpad
def strip_textpad(text): """ Attempt to intelligently strip excess whitespace from the output of a curses textpad. """ if text is None: return text # Trivial case where the textbox is only one line long. if '\n' not in text: return text.rstrip() # Allow one space at the end of the line. If there is more than one # space, assume that a newline operation was intended by the user stack, current_line = [], '' for line in text.split('\n'): if line.endswith(' ') or not line: stack.append(current_line + line.rstrip()) current_line = '' else: current_line += line stack.append(current_line) # Prune empty lines at the bottom of the textbox. for item in stack[::-1]: if not item: stack.pop() else: break out = '\n'.join(stack) return out
python
def strip_textpad(text): """ Attempt to intelligently strip excess whitespace from the output of a curses textpad. """ if text is None: return text # Trivial case where the textbox is only one line long. if '\n' not in text: return text.rstrip() # Allow one space at the end of the line. If there is more than one # space, assume that a newline operation was intended by the user stack, current_line = [], '' for line in text.split('\n'): if line.endswith(' ') or not line: stack.append(current_line + line.rstrip()) current_line = '' else: current_line += line stack.append(current_line) # Prune empty lines at the bottom of the textbox. for item in stack[::-1]: if not item: stack.pop() else: break out = '\n'.join(stack) return out
['def', 'strip_textpad', '(', 'text', ')', ':', 'if', 'text', 'is', 'None', ':', 'return', 'text', '# Trivial case where the textbox is only one line long.', 'if', "'\\n'", 'not', 'in', 'text', ':', 'return', 'text', '.', 'rstrip', '(', ')', '# Allow one space at the end of the line. If there is more than one', '# space, assume that a newline operation was intended by the user', 'stack', ',', 'current_line', '=', '[', ']', ',', "''", 'for', 'line', 'in', 'text', '.', 'split', '(', "'\\n'", ')', ':', 'if', 'line', '.', 'endswith', '(', "' '", ')', 'or', 'not', 'line', ':', 'stack', '.', 'append', '(', 'current_line', '+', 'line', '.', 'rstrip', '(', ')', ')', 'current_line', '=', "''", 'else', ':', 'current_line', '+=', 'line', 'stack', '.', 'append', '(', 'current_line', ')', '# Prune empty lines at the bottom of the textbox.', 'for', 'item', 'in', 'stack', '[', ':', ':', '-', '1', ']', ':', 'if', 'not', 'item', ':', 'stack', '.', 'pop', '(', ')', 'else', ':', 'break', 'out', '=', "'\\n'", '.', 'join', '(', 'stack', ')', 'return', 'out']
Attempt to intelligently strip excess whitespace from the output of a curses textpad.
['Attempt', 'to', 'intelligently', 'strip', 'excess', 'whitespace', 'from', 'the', 'output', 'of', 'a', 'curses', 'textpad', '.']
train
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/terminal.py#L878-L910
8,273
fprimex/zdesk
zdesk/zdesk_api.py
ZendeskAPI.user_tickets_assigned
def user_tickets_assigned(self, user_id, external_id=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/tickets#allowed-for" api_path = "/api/v2/users/{user_id}/tickets/assigned.json" api_path = api_path.format(user_id=user_id) api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if external_id: api_query.update({ "external_id": external_id, }) return self.call(api_path, query=api_query, **kwargs)
python
def user_tickets_assigned(self, user_id, external_id=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/tickets#allowed-for" api_path = "/api/v2/users/{user_id}/tickets/assigned.json" api_path = api_path.format(user_id=user_id) api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if external_id: api_query.update({ "external_id": external_id, }) return self.call(api_path, query=api_query, **kwargs)
['def', 'user_tickets_assigned', '(', 'self', ',', 'user_id', ',', 'external_id', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'api_path', '=', '"/api/v2/users/{user_id}/tickets/assigned.json"', 'api_path', '=', 'api_path', '.', 'format', '(', 'user_id', '=', 'user_id', ')', 'api_query', '=', '{', '}', 'if', '"query"', 'in', 'kwargs', '.', 'keys', '(', ')', ':', 'api_query', '.', 'update', '(', 'kwargs', '[', '"query"', ']', ')', 'del', 'kwargs', '[', '"query"', ']', 'if', 'external_id', ':', 'api_query', '.', 'update', '(', '{', '"external_id"', ':', 'external_id', ',', '}', ')', 'return', 'self', '.', 'call', '(', 'api_path', ',', 'query', '=', 'api_query', ',', '*', '*', 'kwargs', ')']
https://developer.zendesk.com/rest_api/docs/core/tickets#allowed-for
['https', ':', '//', 'developer', '.', 'zendesk', '.', 'com', '/', 'rest_api', '/', 'docs', '/', 'core', '/', 'tickets#allowed', '-', 'for']
train
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L4110-L4122
8,274
cggh/scikit-allel
allel/compat.py
memoryview_safe
def memoryview_safe(x): """Make array safe to run in a Cython memoryview-based kernel. These kernels typically break down with the error ``ValueError: buffer source array is read-only`` when running in dask distributed. See Also -------- https://github.com/dask/distributed/issues/1978 https://github.com/cggh/scikit-allel/issues/206 """ if not x.flags.writeable: if not x.flags.owndata: x = x.copy(order='A') x.setflags(write=True) return x
python
def memoryview_safe(x): """Make array safe to run in a Cython memoryview-based kernel. These kernels typically break down with the error ``ValueError: buffer source array is read-only`` when running in dask distributed. See Also -------- https://github.com/dask/distributed/issues/1978 https://github.com/cggh/scikit-allel/issues/206 """ if not x.flags.writeable: if not x.flags.owndata: x = x.copy(order='A') x.setflags(write=True) return x
['def', 'memoryview_safe', '(', 'x', ')', ':', 'if', 'not', 'x', '.', 'flags', '.', 'writeable', ':', 'if', 'not', 'x', '.', 'flags', '.', 'owndata', ':', 'x', '=', 'x', '.', 'copy', '(', 'order', '=', "'A'", ')', 'x', '.', 'setflags', '(', 'write', '=', 'True', ')', 'return', 'x']
Make array safe to run in a Cython memoryview-based kernel. These kernels typically break down with the error ``ValueError: buffer source array is read-only`` when running in dask distributed. See Also -------- https://github.com/dask/distributed/issues/1978 https://github.com/cggh/scikit-allel/issues/206
['Make', 'array', 'safe', 'to', 'run', 'in', 'a', 'Cython', 'memoryview', '-', 'based', 'kernel', '.', 'These', 'kernels', 'typically', 'break', 'down', 'with', 'the', 'error', 'ValueError', ':', 'buffer', 'source', 'array', 'is', 'read', '-', 'only', 'when', 'running', 'in', 'dask', 'distributed', '.']
train
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/compat.py#L51-L66
8,275
ianmiell/shutit
shutit_pexpect.py
ShutItPexpectSession.whoami
def whoami(self, note=None, loglevel=logging.DEBUG): """Returns the current user by executing "whoami". @param note: See send() @return: the output of "whoami" @rtype: string """ shutit = self.shutit shutit.handle_note(note) res = self.send_and_get_output(' command whoami', echo=False, loglevel=loglevel).strip() if res == '': res = self.send_and_get_output(' command id -u -n', echo=False, loglevel=loglevel).strip() shutit.handle_note_after(note=note) return res
python
def whoami(self, note=None, loglevel=logging.DEBUG): """Returns the current user by executing "whoami". @param note: See send() @return: the output of "whoami" @rtype: string """ shutit = self.shutit shutit.handle_note(note) res = self.send_and_get_output(' command whoami', echo=False, loglevel=loglevel).strip() if res == '': res = self.send_and_get_output(' command id -u -n', echo=False, loglevel=loglevel).strip() shutit.handle_note_after(note=note) return res
['def', 'whoami', '(', 'self', ',', 'note', '=', 'None', ',', 'loglevel', '=', 'logging', '.', 'DEBUG', ')', ':', 'shutit', '=', 'self', '.', 'shutit', 'shutit', '.', 'handle_note', '(', 'note', ')', 'res', '=', 'self', '.', 'send_and_get_output', '(', "' command whoami'", ',', 'echo', '=', 'False', ',', 'loglevel', '=', 'loglevel', ')', '.', 'strip', '(', ')', 'if', 'res', '==', "''", ':', 'res', '=', 'self', '.', 'send_and_get_output', '(', "' command id -u -n'", ',', 'echo', '=', 'False', ',', 'loglevel', '=', 'loglevel', ')', '.', 'strip', '(', ')', 'shutit', '.', 'handle_note_after', '(', 'note', '=', 'note', ')', 'return', 'res']
Returns the current user by executing "whoami". @param note: See send() @return: the output of "whoami" @rtype: string
['Returns', 'the', 'current', 'user', 'by', 'executing', 'whoami', '.']
train
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_pexpect.py#L671-L691
8,276
santosjorge/cufflinks
cufflinks/colors.py
to_rgba
def to_rgba(color, alpha): """ Converts from hex|rgb to rgba Parameters: ----------- color : string Color representation on hex or rgb alpha : float Value from 0 to 1.0 that represents the alpha value. Example: to_rgba('#E1E5ED',0.6) to_rgba('#f03',0.7) to_rgba('rgb(23,23,23)',.5) """ if type(color) == tuple: color, alpha = color color = color.lower() if 'rgba' in color: cl = list(eval(color.replace('rgba', ''))) if alpha: cl[3] = alpha return 'rgba' + str(tuple(cl)) elif 'rgb' in color: r, g, b = eval(color.replace('rgb', '')) return 'rgba' + str((r, g, b, alpha)) else: return to_rgba(hex_to_rgb(color), alpha)
python
def to_rgba(color, alpha): """ Converts from hex|rgb to rgba Parameters: ----------- color : string Color representation on hex or rgb alpha : float Value from 0 to 1.0 that represents the alpha value. Example: to_rgba('#E1E5ED',0.6) to_rgba('#f03',0.7) to_rgba('rgb(23,23,23)',.5) """ if type(color) == tuple: color, alpha = color color = color.lower() if 'rgba' in color: cl = list(eval(color.replace('rgba', ''))) if alpha: cl[3] = alpha return 'rgba' + str(tuple(cl)) elif 'rgb' in color: r, g, b = eval(color.replace('rgb', '')) return 'rgba' + str((r, g, b, alpha)) else: return to_rgba(hex_to_rgb(color), alpha)
['def', 'to_rgba', '(', 'color', ',', 'alpha', ')', ':', 'if', 'type', '(', 'color', ')', '==', 'tuple', ':', 'color', ',', 'alpha', '=', 'color', 'color', '=', 'color', '.', 'lower', '(', ')', 'if', "'rgba'", 'in', 'color', ':', 'cl', '=', 'list', '(', 'eval', '(', 'color', '.', 'replace', '(', "'rgba'", ',', "''", ')', ')', ')', 'if', 'alpha', ':', 'cl', '[', '3', ']', '=', 'alpha', 'return', "'rgba'", '+', 'str', '(', 'tuple', '(', 'cl', ')', ')', 'elif', "'rgb'", 'in', 'color', ':', 'r', ',', 'g', ',', 'b', '=', 'eval', '(', 'color', '.', 'replace', '(', "'rgb'", ',', "''", ')', ')', 'return', "'rgba'", '+', 'str', '(', '(', 'r', ',', 'g', ',', 'b', ',', 'alpha', ')', ')', 'else', ':', 'return', 'to_rgba', '(', 'hex_to_rgb', '(', 'color', ')', ',', 'alpha', ')']
Converts from hex|rgb to rgba Parameters: ----------- color : string Color representation on hex or rgb alpha : float Value from 0 to 1.0 that represents the alpha value. Example: to_rgba('#E1E5ED',0.6) to_rgba('#f03',0.7) to_rgba('rgb(23,23,23)',.5)
['Converts', 'from', 'hex|rgb', 'to', 'rgba']
train
https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/colors.py#L23-L52
8,277
linkedin/luminol
src/luminol/modules/time_series.py
TimeSeries.sum
def sum(self, default=None): """ Calculate the sum of all the values in the times series. :param default: Value to return as a default should the calculation not be possible. :return: Float representing the sum or `None`. """ return numpy.asscalar(numpy.sum(self.values)) if self.values else default
python
def sum(self, default=None): """ Calculate the sum of all the values in the times series. :param default: Value to return as a default should the calculation not be possible. :return: Float representing the sum or `None`. """ return numpy.asscalar(numpy.sum(self.values)) if self.values else default
['def', 'sum', '(', 'self', ',', 'default', '=', 'None', ')', ':', 'return', 'numpy', '.', 'asscalar', '(', 'numpy', '.', 'sum', '(', 'self', '.', 'values', ')', ')', 'if', 'self', '.', 'values', 'else', 'default']
Calculate the sum of all the values in the times series. :param default: Value to return as a default should the calculation not be possible. :return: Float representing the sum or `None`.
['Calculate', 'the', 'sum', 'of', 'all', 'the', 'values', 'in', 'the', 'times', 'series', '.']
train
https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/modules/time_series.py#L367-L374
8,278
RJT1990/pyflux
pyflux/var/var.py
VAR._create_B
def _create_B(self,Y): """ Creates OLS coefficient matrix Parameters ---------- Y : np.array The dependent variables Y Returns ---------- The coefficient matrix B """ Z = self._create_Z(Y) return np.dot(np.dot(Y,np.transpose(Z)),np.linalg.inv(np.dot(Z,np.transpose(Z))))
python
def _create_B(self,Y): """ Creates OLS coefficient matrix Parameters ---------- Y : np.array The dependent variables Y Returns ---------- The coefficient matrix B """ Z = self._create_Z(Y) return np.dot(np.dot(Y,np.transpose(Z)),np.linalg.inv(np.dot(Z,np.transpose(Z))))
['def', '_create_B', '(', 'self', ',', 'Y', ')', ':', 'Z', '=', 'self', '.', '_create_Z', '(', 'Y', ')', 'return', 'np', '.', 'dot', '(', 'np', '.', 'dot', '(', 'Y', ',', 'np', '.', 'transpose', '(', 'Z', ')', ')', ',', 'np', '.', 'linalg', '.', 'inv', '(', 'np', '.', 'dot', '(', 'Z', ',', 'np', '.', 'transpose', '(', 'Z', ')', ')', ')', ')']
Creates OLS coefficient matrix Parameters ---------- Y : np.array The dependent variables Y Returns ---------- The coefficient matrix B
['Creates', 'OLS', 'coefficient', 'matrix']
train
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/var/var.py#L97-L111
8,279
spyder-ide/spyder
spyder/plugins/ipythonconsole/widgets/client.py
ClientWidget.configure_shellwidget
def configure_shellwidget(self, give_focus=True): """Configure shellwidget after kernel is started""" if give_focus: self.get_control().setFocus() # Set exit callback self.shellwidget.set_exit_callback() # To save history self.shellwidget.executing.connect(self.add_to_history) # For Mayavi to run correctly self.shellwidget.executing.connect( self.shellwidget.set_backend_for_mayavi) # To update history after execution self.shellwidget.executed.connect(self.update_history) # To update the Variable Explorer after execution self.shellwidget.executed.connect( self.shellwidget.refresh_namespacebrowser) # To enable the stop button when executing a process self.shellwidget.executing.connect(self.enable_stop_button) # To disable the stop button after execution stopped self.shellwidget.executed.connect(self.disable_stop_button) # To show kernel restarted/died messages self.shellwidget.sig_kernel_restarted.connect( self.kernel_restarted_message) # To correctly change Matplotlib backend interactively self.shellwidget.executing.connect( self.shellwidget.change_mpl_backend) # To show env and sys.path contents self.shellwidget.sig_show_syspath.connect(self.show_syspath) self.shellwidget.sig_show_env.connect(self.show_env) # To sync with working directory toolbar self.shellwidget.executed.connect(self.shellwidget.get_cwd) # To apply style self.set_color_scheme(self.shellwidget.syntax_style, reset=False) # To hide the loading page self.shellwidget.sig_prompt_ready.connect(self._hide_loading_page) # Show possible errors when setting Matplotlib backend self.shellwidget.sig_prompt_ready.connect( self._show_mpl_backend_errors)
python
def configure_shellwidget(self, give_focus=True): """Configure shellwidget after kernel is started""" if give_focus: self.get_control().setFocus() # Set exit callback self.shellwidget.set_exit_callback() # To save history self.shellwidget.executing.connect(self.add_to_history) # For Mayavi to run correctly self.shellwidget.executing.connect( self.shellwidget.set_backend_for_mayavi) # To update history after execution self.shellwidget.executed.connect(self.update_history) # To update the Variable Explorer after execution self.shellwidget.executed.connect( self.shellwidget.refresh_namespacebrowser) # To enable the stop button when executing a process self.shellwidget.executing.connect(self.enable_stop_button) # To disable the stop button after execution stopped self.shellwidget.executed.connect(self.disable_stop_button) # To show kernel restarted/died messages self.shellwidget.sig_kernel_restarted.connect( self.kernel_restarted_message) # To correctly change Matplotlib backend interactively self.shellwidget.executing.connect( self.shellwidget.change_mpl_backend) # To show env and sys.path contents self.shellwidget.sig_show_syspath.connect(self.show_syspath) self.shellwidget.sig_show_env.connect(self.show_env) # To sync with working directory toolbar self.shellwidget.executed.connect(self.shellwidget.get_cwd) # To apply style self.set_color_scheme(self.shellwidget.syntax_style, reset=False) # To hide the loading page self.shellwidget.sig_prompt_ready.connect(self._hide_loading_page) # Show possible errors when setting Matplotlib backend self.shellwidget.sig_prompt_ready.connect( self._show_mpl_backend_errors)
['def', 'configure_shellwidget', '(', 'self', ',', 'give_focus', '=', 'True', ')', ':', 'if', 'give_focus', ':', 'self', '.', 'get_control', '(', ')', '.', 'setFocus', '(', ')', '# Set exit callback\r', 'self', '.', 'shellwidget', '.', 'set_exit_callback', '(', ')', '# To save history\r', 'self', '.', 'shellwidget', '.', 'executing', '.', 'connect', '(', 'self', '.', 'add_to_history', ')', '# For Mayavi to run correctly\r', 'self', '.', 'shellwidget', '.', 'executing', '.', 'connect', '(', 'self', '.', 'shellwidget', '.', 'set_backend_for_mayavi', ')', '# To update history after execution\r', 'self', '.', 'shellwidget', '.', 'executed', '.', 'connect', '(', 'self', '.', 'update_history', ')', '# To update the Variable Explorer after execution\r', 'self', '.', 'shellwidget', '.', 'executed', '.', 'connect', '(', 'self', '.', 'shellwidget', '.', 'refresh_namespacebrowser', ')', '# To enable the stop button when executing a process\r', 'self', '.', 'shellwidget', '.', 'executing', '.', 'connect', '(', 'self', '.', 'enable_stop_button', ')', '# To disable the stop button after execution stopped\r', 'self', '.', 'shellwidget', '.', 'executed', '.', 'connect', '(', 'self', '.', 'disable_stop_button', ')', '# To show kernel restarted/died messages\r', 'self', '.', 'shellwidget', '.', 'sig_kernel_restarted', '.', 'connect', '(', 'self', '.', 'kernel_restarted_message', ')', '# To correctly change Matplotlib backend interactively\r', 'self', '.', 'shellwidget', '.', 'executing', '.', 'connect', '(', 'self', '.', 'shellwidget', '.', 'change_mpl_backend', ')', '# To show env and sys.path contents\r', 'self', '.', 'shellwidget', '.', 'sig_show_syspath', '.', 'connect', '(', 'self', '.', 'show_syspath', ')', 'self', '.', 'shellwidget', '.', 'sig_show_env', '.', 'connect', '(', 'self', '.', 'show_env', ')', '# To sync with working directory toolbar\r', 'self', '.', 'shellwidget', '.', 'executed', '.', 'connect', '(', 'self', '.', 'shellwidget', '.', 'get_cwd', ')', '# To apply style\r', 'self', '.', 'set_color_scheme', '(', 'self', '.', 'shellwidget', '.', 'syntax_style', ',', 'reset', '=', 'False', ')', '# To hide the loading page\r', 'self', '.', 'shellwidget', '.', 'sig_prompt_ready', '.', 'connect', '(', 'self', '.', '_hide_loading_page', ')', '# Show possible errors when setting Matplotlib backend\r', 'self', '.', 'shellwidget', '.', 'sig_prompt_ready', '.', 'connect', '(', 'self', '.', '_show_mpl_backend_errors', ')']
Configure shellwidget after kernel is started
['Configure', 'shellwidget', 'after', 'kernel', 'is', 'started']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/widgets/client.py#L241-L292
8,280
vburenin/xjpath
xjpath/xjpath.py
_full_sub_array
def _full_sub_array(data_obj, xj_path, create_dict_path): """Retrieves all array or dictionary elements for '*' JSON path marker. :param dict|list data_obj: The current data object. :param str xj_path: A json path. :param bool create_dict_path create a dict path. :return: tuple with two values: first is a result and second a boolean flag telling if this value exists or not. """ if isinstance(data_obj, list): if xj_path: res = [] for d in data_obj: val, exists = path_lookup(d, xj_path, create_dict_path) if exists: res.append(val) return tuple(res), True else: return tuple(data_obj), True elif isinstance(data_obj, dict): if xj_path: res = [] for d in data_obj.values(): val, exists = path_lookup(d, xj_path, create_dict_path) if exists: res.append(val) return tuple(res), True else: return tuple(data_obj.values()), True else: return None, False
python
def _full_sub_array(data_obj, xj_path, create_dict_path): """Retrieves all array or dictionary elements for '*' JSON path marker. :param dict|list data_obj: The current data object. :param str xj_path: A json path. :param bool create_dict_path create a dict path. :return: tuple with two values: first is a result and second a boolean flag telling if this value exists or not. """ if isinstance(data_obj, list): if xj_path: res = [] for d in data_obj: val, exists = path_lookup(d, xj_path, create_dict_path) if exists: res.append(val) return tuple(res), True else: return tuple(data_obj), True elif isinstance(data_obj, dict): if xj_path: res = [] for d in data_obj.values(): val, exists = path_lookup(d, xj_path, create_dict_path) if exists: res.append(val) return tuple(res), True else: return tuple(data_obj.values()), True else: return None, False
['def', '_full_sub_array', '(', 'data_obj', ',', 'xj_path', ',', 'create_dict_path', ')', ':', 'if', 'isinstance', '(', 'data_obj', ',', 'list', ')', ':', 'if', 'xj_path', ':', 'res', '=', '[', ']', 'for', 'd', 'in', 'data_obj', ':', 'val', ',', 'exists', '=', 'path_lookup', '(', 'd', ',', 'xj_path', ',', 'create_dict_path', ')', 'if', 'exists', ':', 'res', '.', 'append', '(', 'val', ')', 'return', 'tuple', '(', 'res', ')', ',', 'True', 'else', ':', 'return', 'tuple', '(', 'data_obj', ')', ',', 'True', 'elif', 'isinstance', '(', 'data_obj', ',', 'dict', ')', ':', 'if', 'xj_path', ':', 'res', '=', '[', ']', 'for', 'd', 'in', 'data_obj', '.', 'values', '(', ')', ':', 'val', ',', 'exists', '=', 'path_lookup', '(', 'd', ',', 'xj_path', ',', 'create_dict_path', ')', 'if', 'exists', ':', 'res', '.', 'append', '(', 'val', ')', 'return', 'tuple', '(', 'res', ')', ',', 'True', 'else', ':', 'return', 'tuple', '(', 'data_obj', '.', 'values', '(', ')', ')', ',', 'True', 'else', ':', 'return', 'None', ',', 'False']
Retrieves all array or dictionary elements for '*' JSON path marker. :param dict|list data_obj: The current data object. :param str xj_path: A json path. :param bool create_dict_path create a dict path. :return: tuple with two values: first is a result and second a boolean flag telling if this value exists or not.
['Retrieves', 'all', 'array', 'or', 'dictionary', 'elements', 'for', '*', 'JSON', 'path', 'marker', '.']
train
https://github.com/vburenin/xjpath/blob/98a19fd6e6d0bcdc5ecbd3651ffa8915f06d7d44/xjpath/xjpath.py#L158-L189
8,281
spotify/luigi
luigi/tools/range.py
infer_bulk_complete_from_fs
def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re): """ Efficiently determines missing datetimes by filesystem listing. The current implementation works for the common case of a task writing output to a ``FileSystemTarget`` whose path is built using strftime with format like '...%Y...%m...%d...%H...', without custom ``complete()`` or ``exists()``. (Eventually Luigi could have ranges of completion as first-class citizens. Then this listing business could be factored away/be provided for explicitly in target API or some kind of a history server.) """ filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re) paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes] listing = set() for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes listing |= _list_existing(f, g, p) # quickly learn everything that's missing missing_datetimes = [] for d, p in zip(datetimes, paths_by_datetime): if not set(p) <= listing: missing_datetimes.append(d) return missing_datetimes
python
def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re): """ Efficiently determines missing datetimes by filesystem listing. The current implementation works for the common case of a task writing output to a ``FileSystemTarget`` whose path is built using strftime with format like '...%Y...%m...%d...%H...', without custom ``complete()`` or ``exists()``. (Eventually Luigi could have ranges of completion as first-class citizens. Then this listing business could be factored away/be provided for explicitly in target API or some kind of a history server.) """ filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re) paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes] listing = set() for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes listing |= _list_existing(f, g, p) # quickly learn everything that's missing missing_datetimes = [] for d, p in zip(datetimes, paths_by_datetime): if not set(p) <= listing: missing_datetimes.append(d) return missing_datetimes
['def', 'infer_bulk_complete_from_fs', '(', 'datetimes', ',', 'datetime_to_task', ',', 'datetime_to_re', ')', ':', 'filesystems_and_globs_by_location', '=', '_get_filesystems_and_globs', '(', 'datetime_to_task', ',', 'datetime_to_re', ')', 'paths_by_datetime', '=', '[', '[', 'o', '.', 'path', 'for', 'o', 'in', 'flatten_output', '(', 'datetime_to_task', '(', 'd', ')', ')', ']', 'for', 'd', 'in', 'datetimes', ']', 'listing', '=', 'set', '(', ')', 'for', '(', 'f', ',', 'g', ')', ',', 'p', 'in', 'zip', '(', 'filesystems_and_globs_by_location', ',', 'zip', '(', '*', 'paths_by_datetime', ')', ')', ':', "# transposed, so here we're iterating over logical outputs, not datetimes", 'listing', '|=', '_list_existing', '(', 'f', ',', 'g', ',', 'p', ')', "# quickly learn everything that's missing", 'missing_datetimes', '=', '[', ']', 'for', 'd', ',', 'p', 'in', 'zip', '(', 'datetimes', ',', 'paths_by_datetime', ')', ':', 'if', 'not', 'set', '(', 'p', ')', '<=', 'listing', ':', 'missing_datetimes', '.', 'append', '(', 'd', ')', 'return', 'missing_datetimes']
Efficiently determines missing datetimes by filesystem listing. The current implementation works for the common case of a task writing output to a ``FileSystemTarget`` whose path is built using strftime with format like '...%Y...%m...%d...%H...', without custom ``complete()`` or ``exists()``. (Eventually Luigi could have ranges of completion as first-class citizens. Then this listing business could be factored away/be provided for explicitly in target API or some kind of a history server.)
['Efficiently', 'determines', 'missing', 'datetimes', 'by', 'filesystem', 'listing', '.']
train
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L622-L647
8,282
Karaage-Cluster/python-tldap
tldap/database/__init__.py
_db_to_python
def _db_to_python(db_data: dict, table: LdapObjectClass, dn: str) -> LdapObject: """ Convert a DbDate object to a LdapObject. """ fields = table.get_fields() python_data = table({ name: field.to_python(db_data[name]) for name, field in fields.items() if field.db_field }) python_data = python_data.merge({ 'dn': dn, }) return python_data
python
def _db_to_python(db_data: dict, table: LdapObjectClass, dn: str) -> LdapObject: """ Convert a DbDate object to a LdapObject. """ fields = table.get_fields() python_data = table({ name: field.to_python(db_data[name]) for name, field in fields.items() if field.db_field }) python_data = python_data.merge({ 'dn': dn, }) return python_data
['def', '_db_to_python', '(', 'db_data', ':', 'dict', ',', 'table', ':', 'LdapObjectClass', ',', 'dn', ':', 'str', ')', '->', 'LdapObject', ':', 'fields', '=', 'table', '.', 'get_fields', '(', ')', 'python_data', '=', 'table', '(', '{', 'name', ':', 'field', '.', 'to_python', '(', 'db_data', '[', 'name', ']', ')', 'for', 'name', ',', 'field', 'in', 'fields', '.', 'items', '(', ')', 'if', 'field', '.', 'db_field', '}', ')', 'python_data', '=', 'python_data', '.', 'merge', '(', '{', "'dn'", ':', 'dn', ',', '}', ')', 'return', 'python_data']
Convert a DbDate object to a LdapObject.
['Convert', 'a', 'DbDate', 'object', 'to', 'a', 'LdapObject', '.']
train
https://github.com/Karaage-Cluster/python-tldap/blob/61f1af74a3648cb6491e7eeb1ee2eb395d67bf59/tldap/database/__init__.py#L392-L404
8,283
bioinf-jku/FCD
build/lib/fcd/FCD.py
calculate_frechet_distance
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): """Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Params: -- mu1: The mean of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for generated samples. -- mu2: The mean of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for real samples. -- sigma1: The covariance matrix of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for generated samples. -- sigma2: The covariance matrix of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for real samples. Returns: -- : The Frechet Distance. """ mu1 = np.atleast_1d(mu1) mu2 = np.atleast_1d(mu2) sigma1 = np.atleast_2d(sigma1) sigma2 = np.atleast_2d(sigma2) assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths" assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions" diff = mu1 - mu2 # product might be almost singular covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): offset = np.eye(sigma1.shape[0]) * eps covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) # numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) raise ValueError("Imaginary component {}".format(m)) covmean = covmean.real tr_covmean = np.trace(covmean) return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
python
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): """Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Params: -- mu1: The mean of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for generated samples. -- mu2: The mean of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for real samples. -- sigma1: The covariance matrix of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for generated samples. -- sigma2: The covariance matrix of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for real samples. Returns: -- : The Frechet Distance. """ mu1 = np.atleast_1d(mu1) mu2 = np.atleast_1d(mu2) sigma1 = np.atleast_2d(sigma1) sigma2 = np.atleast_2d(sigma2) assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths" assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions" diff = mu1 - mu2 # product might be almost singular covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): offset = np.eye(sigma1.shape[0]) * eps covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) # numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) raise ValueError("Imaginary component {}".format(m)) covmean = covmean.real tr_covmean = np.trace(covmean) return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
['def', 'calculate_frechet_distance', '(', 'mu1', ',', 'sigma1', ',', 'mu2', ',', 'sigma2', ',', 'eps', '=', '1e-6', ')', ':', 'mu1', '=', 'np', '.', 'atleast_1d', '(', 'mu1', ')', 'mu2', '=', 'np', '.', 'atleast_1d', '(', 'mu2', ')', 'sigma1', '=', 'np', '.', 'atleast_2d', '(', 'sigma1', ')', 'sigma2', '=', 'np', '.', 'atleast_2d', '(', 'sigma2', ')', 'assert', 'mu1', '.', 'shape', '==', 'mu2', '.', 'shape', ',', '"Training and test mean vectors have different lengths"', 'assert', 'sigma1', '.', 'shape', '==', 'sigma2', '.', 'shape', ',', '"Training and test covariances have different dimensions"', 'diff', '=', 'mu1', '-', 'mu2', '# product might be almost singular', 'covmean', ',', '_', '=', 'linalg', '.', 'sqrtm', '(', 'sigma1', '.', 'dot', '(', 'sigma2', ')', ',', 'disp', '=', 'False', ')', 'if', 'not', 'np', '.', 'isfinite', '(', 'covmean', ')', '.', 'all', '(', ')', ':', 'offset', '=', 'np', '.', 'eye', '(', 'sigma1', '.', 'shape', '[', '0', ']', ')', '*', 'eps', 'covmean', '=', 'linalg', '.', 'sqrtm', '(', '(', 'sigma1', '+', 'offset', ')', '.', 'dot', '(', 'sigma2', '+', 'offset', ')', ')', '# numerical error might give slight imaginary component', 'if', 'np', '.', 'iscomplexobj', '(', 'covmean', ')', ':', 'if', 'not', 'np', '.', 'allclose', '(', 'np', '.', 'diagonal', '(', 'covmean', ')', '.', 'imag', ',', '0', ',', 'atol', '=', '1e-3', ')', ':', 'm', '=', 'np', '.', 'max', '(', 'np', '.', 'abs', '(', 'covmean', '.', 'imag', ')', ')', 'raise', 'ValueError', '(', '"Imaginary component {}"', '.', 'format', '(', 'm', ')', ')', 'covmean', '=', 'covmean', '.', 'real', 'tr_covmean', '=', 'np', '.', 'trace', '(', 'covmean', ')', 'return', 'diff', '.', 'dot', '(', 'diff', ')', '+', 'np', '.', 'trace', '(', 'sigma1', ')', '+', 'np', '.', 'trace', '(', 'sigma2', ')', '-', '2', '*', 'tr_covmean']
Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Params: -- mu1: The mean of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for generated samples. -- mu2: The mean of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for real samples. -- sigma1: The covariance matrix of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for generated samples. -- sigma2: The covariance matrix of the activations of preultimate layer of the CHEMNET ( like returned by the function 'get_predictions') for real samples. Returns: -- : The Frechet Distance.
['Numpy', 'implementation', 'of', 'the', 'Frechet', 'Distance', '.', 'The', 'Frechet', 'distance', 'between', 'two', 'multivariate', 'Gaussians', 'X_1', '~', 'N', '(', 'mu_1', 'C_1', ')', 'and', 'X_2', '~', 'N', '(', 'mu_2', 'C_2', ')', 'is', 'd^2', '=', '||mu_1', '-', 'mu_2||^2', '+', 'Tr', '(', 'C_1', '+', 'C_2', '-', '2', '*', 'sqrt', '(', 'C_1', '*', 'C_2', '))', '.', 'Stable', 'version', 'by', 'Dougal', 'J', '.', 'Sutherland', '.']
train
https://github.com/bioinf-jku/FCD/blob/fe542b16d72a2d0899989374e1a86cc930d891e1/build/lib/fcd/FCD.py#L33-L85
8,284
bcbio/bcbio-nextgen
bcbio/distributed/objectstore.py
AzureBlob.download
def download(cls, filename, input_dir, dl_dir=None): """Download the resource from the storage.""" file_info = cls.parse_remote(filename) if not dl_dir: dl_dir = os.path.join(input_dir, file_info.container, os.path.dirname(file_info.blob)) utils.safe_makedir(dl_dir) out_file = os.path.join(dl_dir, os.path.basename(file_info.blob)) if not utils.file_exists(out_file): with file_transaction({}, out_file) as tx_out_file: blob_service = cls.connect(filename) blob_service.get_blob_to_path( container_name=file_info.container, blob_name=file_info.blob, file_path=tx_out_file) return out_file
python
def download(cls, filename, input_dir, dl_dir=None): """Download the resource from the storage.""" file_info = cls.parse_remote(filename) if not dl_dir: dl_dir = os.path.join(input_dir, file_info.container, os.path.dirname(file_info.blob)) utils.safe_makedir(dl_dir) out_file = os.path.join(dl_dir, os.path.basename(file_info.blob)) if not utils.file_exists(out_file): with file_transaction({}, out_file) as tx_out_file: blob_service = cls.connect(filename) blob_service.get_blob_to_path( container_name=file_info.container, blob_name=file_info.blob, file_path=tx_out_file) return out_file
['def', 'download', '(', 'cls', ',', 'filename', ',', 'input_dir', ',', 'dl_dir', '=', 'None', ')', ':', 'file_info', '=', 'cls', '.', 'parse_remote', '(', 'filename', ')', 'if', 'not', 'dl_dir', ':', 'dl_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'input_dir', ',', 'file_info', '.', 'container', ',', 'os', '.', 'path', '.', 'dirname', '(', 'file_info', '.', 'blob', ')', ')', 'utils', '.', 'safe_makedir', '(', 'dl_dir', ')', 'out_file', '=', 'os', '.', 'path', '.', 'join', '(', 'dl_dir', ',', 'os', '.', 'path', '.', 'basename', '(', 'file_info', '.', 'blob', ')', ')', 'if', 'not', 'utils', '.', 'file_exists', '(', 'out_file', ')', ':', 'with', 'file_transaction', '(', '{', '}', ',', 'out_file', ')', 'as', 'tx_out_file', ':', 'blob_service', '=', 'cls', '.', 'connect', '(', 'filename', ')', 'blob_service', '.', 'get_blob_to_path', '(', 'container_name', '=', 'file_info', '.', 'container', ',', 'blob_name', '=', 'file_info', '.', 'blob', ',', 'file_path', '=', 'tx_out_file', ')', 'return', 'out_file']
Download the resource from the storage.
['Download', 'the', 'resource', 'from', 'the', 'storage', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/distributed/objectstore.py#L497-L514
8,285
bitcraze/crazyflie-lib-python
cflib/crazyflie/__init__.py
Crazyflie._start_connection_setup
def _start_connection_setup(self): """Start the connection setup by refreshing the TOCs""" logger.info('We are connected[%s], request connection setup', self.link_uri) self.platform.fetch_platform_informations(self._platform_info_fetched)
python
def _start_connection_setup(self): """Start the connection setup by refreshing the TOCs""" logger.info('We are connected[%s], request connection setup', self.link_uri) self.platform.fetch_platform_informations(self._platform_info_fetched)
['def', '_start_connection_setup', '(', 'self', ')', ':', 'logger', '.', 'info', '(', "'We are connected[%s], request connection setup'", ',', 'self', '.', 'link_uri', ')', 'self', '.', 'platform', '.', 'fetch_platform_informations', '(', 'self', '.', '_platform_info_fetched', ')']
Start the connection setup by refreshing the TOCs
['Start', 'the', 'connection', 'setup', 'by', 'refreshing', 'the', 'TOCs']
train
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/__init__.py#L156-L160
8,286
fitodic/centerline
centerline/main.py
Centerline._create_centerline
def _create_centerline(self): """ Calculate the centerline of a polygon. Densifies the border of a polygon which is then represented by a Numpy array of points necessary for creating the Voronoi diagram. Once the diagram is created, the ridges located within the polygon are joined and returned. Returns: a union of lines that are located within the polygon. """ border = array(self.__densify_border()) vor = Voronoi(border) vertex = vor.vertices lst_lines = [] for j, ridge in enumerate(vor.ridge_vertices): if -1 not in ridge: line = LineString([ (vertex[ridge[0]][0] + self._minx, vertex[ridge[0]][1] + self._miny), (vertex[ridge[1]][0] + self._minx, vertex[ridge[1]][1] + self._miny)]) if line.within(self._input_geom) and len(line.coords[0]) > 1: lst_lines.append(line) nr_lines = len(lst_lines) if nr_lines < 2: raise RuntimeError(( "Number of produced ridges is too small: {}" ", this might be caused by too large interpolation distance." ).format(nr_lines)) return unary_union(lst_lines)
python
def _create_centerline(self): """ Calculate the centerline of a polygon. Densifies the border of a polygon which is then represented by a Numpy array of points necessary for creating the Voronoi diagram. Once the diagram is created, the ridges located within the polygon are joined and returned. Returns: a union of lines that are located within the polygon. """ border = array(self.__densify_border()) vor = Voronoi(border) vertex = vor.vertices lst_lines = [] for j, ridge in enumerate(vor.ridge_vertices): if -1 not in ridge: line = LineString([ (vertex[ridge[0]][0] + self._minx, vertex[ridge[0]][1] + self._miny), (vertex[ridge[1]][0] + self._minx, vertex[ridge[1]][1] + self._miny)]) if line.within(self._input_geom) and len(line.coords[0]) > 1: lst_lines.append(line) nr_lines = len(lst_lines) if nr_lines < 2: raise RuntimeError(( "Number of produced ridges is too small: {}" ", this might be caused by too large interpolation distance." ).format(nr_lines)) return unary_union(lst_lines)
['def', '_create_centerline', '(', 'self', ')', ':', 'border', '=', 'array', '(', 'self', '.', '__densify_border', '(', ')', ')', 'vor', '=', 'Voronoi', '(', 'border', ')', 'vertex', '=', 'vor', '.', 'vertices', 'lst_lines', '=', '[', ']', 'for', 'j', ',', 'ridge', 'in', 'enumerate', '(', 'vor', '.', 'ridge_vertices', ')', ':', 'if', '-', '1', 'not', 'in', 'ridge', ':', 'line', '=', 'LineString', '(', '[', '(', 'vertex', '[', 'ridge', '[', '0', ']', ']', '[', '0', ']', '+', 'self', '.', '_minx', ',', 'vertex', '[', 'ridge', '[', '0', ']', ']', '[', '1', ']', '+', 'self', '.', '_miny', ')', ',', '(', 'vertex', '[', 'ridge', '[', '1', ']', ']', '[', '0', ']', '+', 'self', '.', '_minx', ',', 'vertex', '[', 'ridge', '[', '1', ']', ']', '[', '1', ']', '+', 'self', '.', '_miny', ')', ']', ')', 'if', 'line', '.', 'within', '(', 'self', '.', '_input_geom', ')', 'and', 'len', '(', 'line', '.', 'coords', '[', '0', ']', ')', '>', '1', ':', 'lst_lines', '.', 'append', '(', 'line', ')', 'nr_lines', '=', 'len', '(', 'lst_lines', ')', 'if', 'nr_lines', '<', '2', ':', 'raise', 'RuntimeError', '(', '(', '"Number of produced ridges is too small: {}"', '", this might be caused by too large interpolation distance."', ')', '.', 'format', '(', 'nr_lines', ')', ')', 'return', 'unary_union', '(', 'lst_lines', ')']
Calculate the centerline of a polygon. Densifies the border of a polygon which is then represented by a Numpy array of points necessary for creating the Voronoi diagram. Once the diagram is created, the ridges located within the polygon are joined and returned. Returns: a union of lines that are located within the polygon.
['Calculate', 'the', 'centerline', 'of', 'a', 'polygon', '.']
train
https://github.com/fitodic/centerline/blob/f27e7b1ecb77bd4da40093ab44754cbd3ec9f58b/centerline/main.py#L62-L99
8,287
AndresMWeber/Nomenclate
nomenclate/core/tools.py
flatten
def flatten(it): """ Flattens any iterable From: http://stackoverflow.com/questions/11503065/python-function-to-flatten-generator-containing-another-generator :param it: Iterator, iterator to flatten :return: Generator, A generator of the flattened values """ for x in it: if isinstance(x, collections.Iterable) and not isinstance(x, str): for y in flatten(x): yield y else: yield x
python
def flatten(it): """ Flattens any iterable From: http://stackoverflow.com/questions/11503065/python-function-to-flatten-generator-containing-another-generator :param it: Iterator, iterator to flatten :return: Generator, A generator of the flattened values """ for x in it: if isinstance(x, collections.Iterable) and not isinstance(x, str): for y in flatten(x): yield y else: yield x
['def', 'flatten', '(', 'it', ')', ':', 'for', 'x', 'in', 'it', ':', 'if', 'isinstance', '(', 'x', ',', 'collections', '.', 'Iterable', ')', 'and', 'not', 'isinstance', '(', 'x', ',', 'str', ')', ':', 'for', 'y', 'in', 'flatten', '(', 'x', ')', ':', 'yield', 'y', 'else', ':', 'yield', 'x']
Flattens any iterable From: http://stackoverflow.com/questions/11503065/python-function-to-flatten-generator-containing-another-generator :param it: Iterator, iterator to flatten :return: Generator, A generator of the flattened values
['Flattens', 'any', 'iterable', 'From', ':', 'http', ':', '//', 'stackoverflow', '.', 'com', '/', 'questions', '/', '11503065', '/', 'python', '-', 'function', '-', 'to', '-', 'flatten', '-', 'generator', '-', 'containing', '-', 'another', '-', 'generator']
train
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/tools.py#L124-L137
8,288
saltstack/salt
salt/cli/cp.py
SaltCP._recurse
def _recurse(self, path): ''' Get a list of all specified files ''' files = {} empty_dirs = [] try: sub_paths = os.listdir(path) except OSError as exc: if exc.errno == errno.ENOENT: # Path does not exist sys.stderr.write('{0} does not exist\n'.format(path)) sys.exit(42) elif exc.errno in (errno.EINVAL, errno.ENOTDIR): # Path is a file (EINVAL on Windows, ENOTDIR otherwise) files[path] = self._mode(path) else: if not sub_paths: empty_dirs.append(path) for fn_ in sub_paths: files_, empty_dirs_ = self._recurse(os.path.join(path, fn_)) files.update(files_) empty_dirs.extend(empty_dirs_) return files, empty_dirs
python
def _recurse(self, path): ''' Get a list of all specified files ''' files = {} empty_dirs = [] try: sub_paths = os.listdir(path) except OSError as exc: if exc.errno == errno.ENOENT: # Path does not exist sys.stderr.write('{0} does not exist\n'.format(path)) sys.exit(42) elif exc.errno in (errno.EINVAL, errno.ENOTDIR): # Path is a file (EINVAL on Windows, ENOTDIR otherwise) files[path] = self._mode(path) else: if not sub_paths: empty_dirs.append(path) for fn_ in sub_paths: files_, empty_dirs_ = self._recurse(os.path.join(path, fn_)) files.update(files_) empty_dirs.extend(empty_dirs_) return files, empty_dirs
['def', '_recurse', '(', 'self', ',', 'path', ')', ':', 'files', '=', '{', '}', 'empty_dirs', '=', '[', ']', 'try', ':', 'sub_paths', '=', 'os', '.', 'listdir', '(', 'path', ')', 'except', 'OSError', 'as', 'exc', ':', 'if', 'exc', '.', 'errno', '==', 'errno', '.', 'ENOENT', ':', '# Path does not exist', 'sys', '.', 'stderr', '.', 'write', '(', "'{0} does not exist\\n'", '.', 'format', '(', 'path', ')', ')', 'sys', '.', 'exit', '(', '42', ')', 'elif', 'exc', '.', 'errno', 'in', '(', 'errno', '.', 'EINVAL', ',', 'errno', '.', 'ENOTDIR', ')', ':', '# Path is a file (EINVAL on Windows, ENOTDIR otherwise)', 'files', '[', 'path', ']', '=', 'self', '.', '_mode', '(', 'path', ')', 'else', ':', 'if', 'not', 'sub_paths', ':', 'empty_dirs', '.', 'append', '(', 'path', ')', 'for', 'fn_', 'in', 'sub_paths', ':', 'files_', ',', 'empty_dirs_', '=', 'self', '.', '_recurse', '(', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'fn_', ')', ')', 'files', '.', 'update', '(', 'files_', ')', 'empty_dirs', '.', 'extend', '(', 'empty_dirs_', ')', 'return', 'files', ',', 'empty_dirs']
Get a list of all specified files
['Get', 'a', 'list', 'of', 'all', 'specified', 'files']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/cp.py#L71-L95
8,289
pgjones/quart
quart/app.py
Quart.after_request
def after_request(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable: """Add an after request function. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_request def func(response): return response Arguments: func: The after request function itself. name: Optional blueprint key name. """ handler = ensure_coroutine(func) self.after_request_funcs[name].append(handler) return func
python
def after_request(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable: """Add an after request function. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_request def func(response): return response Arguments: func: The after request function itself. name: Optional blueprint key name. """ handler = ensure_coroutine(func) self.after_request_funcs[name].append(handler) return func
['def', 'after_request', '(', 'self', ',', 'func', ':', 'Callable', ',', 'name', ':', 'AppOrBlueprintKey', '=', 'None', ')', '->', 'Callable', ':', 'handler', '=', 'ensure_coroutine', '(', 'func', ')', 'self', '.', 'after_request_funcs', '[', 'name', ']', '.', 'append', '(', 'handler', ')', 'return', 'func']
Add an after request function. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_request def func(response): return response Arguments: func: The after request function itself. name: Optional blueprint key name.
['Add', 'an', 'after', 'request', 'function', '.']
train
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/app.py#L1085-L1102
8,290
tagcubeio/tagcube-cli
tagcube_cli/utils.py
_parse_config_file_impl
def _parse_config_file_impl(filename): """ Format for the file is: credentials: email: ... api_token: ... :param filename: The filename to parse :return: A tuple with: - email - api_token """ api_key = None email = None try: doc = yaml.load(file(filename).read()) email = doc['credentials']['email'] api_key = doc['credentials']['api_key'] except (KeyError, TypeError): print(INVALID_FILE) return None, None except yaml.scanner.ScannerError, e: print(SYNTAX_ERROR_FILE % (e.problem, e.problem_mark.line)) return None, None # Just in case, we don't want the auth to fail because of a space email = email.strip() api_key = api_key.strip() if not is_valid_api_key(api_key): cli_logger.debug(INVALID_UUID) api_key = None if not is_valid_email(email): cli_logger.debug('Invalid email address: %s' % email) email = None return email, api_key
python
def _parse_config_file_impl(filename): """ Format for the file is: credentials: email: ... api_token: ... :param filename: The filename to parse :return: A tuple with: - email - api_token """ api_key = None email = None try: doc = yaml.load(file(filename).read()) email = doc['credentials']['email'] api_key = doc['credentials']['api_key'] except (KeyError, TypeError): print(INVALID_FILE) return None, None except yaml.scanner.ScannerError, e: print(SYNTAX_ERROR_FILE % (e.problem, e.problem_mark.line)) return None, None # Just in case, we don't want the auth to fail because of a space email = email.strip() api_key = api_key.strip() if not is_valid_api_key(api_key): cli_logger.debug(INVALID_UUID) api_key = None if not is_valid_email(email): cli_logger.debug('Invalid email address: %s' % email) email = None return email, api_key
['def', '_parse_config_file_impl', '(', 'filename', ')', ':', 'api_key', '=', 'None', 'email', '=', 'None', 'try', ':', 'doc', '=', 'yaml', '.', 'load', '(', 'file', '(', 'filename', ')', '.', 'read', '(', ')', ')', 'email', '=', 'doc', '[', "'credentials'", ']', '[', "'email'", ']', 'api_key', '=', 'doc', '[', "'credentials'", ']', '[', "'api_key'", ']', 'except', '(', 'KeyError', ',', 'TypeError', ')', ':', 'print', '(', 'INVALID_FILE', ')', 'return', 'None', ',', 'None', 'except', 'yaml', '.', 'scanner', '.', 'ScannerError', ',', 'e', ':', 'print', '(', 'SYNTAX_ERROR_FILE', '%', '(', 'e', '.', 'problem', ',', 'e', '.', 'problem_mark', '.', 'line', ')', ')', 'return', 'None', ',', 'None', "# Just in case, we don't want the auth to fail because of a space", 'email', '=', 'email', '.', 'strip', '(', ')', 'api_key', '=', 'api_key', '.', 'strip', '(', ')', 'if', 'not', 'is_valid_api_key', '(', 'api_key', ')', ':', 'cli_logger', '.', 'debug', '(', 'INVALID_UUID', ')', 'api_key', '=', 'None', 'if', 'not', 'is_valid_email', '(', 'email', ')', ':', 'cli_logger', '.', 'debug', '(', "'Invalid email address: %s'", '%', 'email', ')', 'email', '=', 'None', 'return', 'email', ',', 'api_key']
Format for the file is: credentials: email: ... api_token: ... :param filename: The filename to parse :return: A tuple with: - email - api_token
['Format', 'for', 'the', 'file', 'is', ':', 'credentials', ':', 'email', ':', '...', 'api_token', ':', '...', ':', 'param', 'filename', ':', 'The', 'filename', 'to', 'parse', ':', 'return', ':', 'A', 'tuple', 'with', ':', '-', 'email', '-', 'api_token']
train
https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube_cli/utils.py#L76-L117
8,291
tanghaibao/jcvi
jcvi/formats/fastq.py
shuffle
def shuffle(args): """ %prog shuffle p1.fastq p2.fastq Shuffle pairs into interleaved format. """ p = OptionParser(shuffle.__doc__) p.set_tag() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) p1, p2 = args pairsfastq = pairspf((p1, p2)) + ".fastq" tag = opts.tag p1fp = must_open(p1) p2fp = must_open(p2) pairsfw = must_open(pairsfastq, "w") nreads = 0 while True: a = list(islice(p1fp, 4)) if not a: break b = list(islice(p2fp, 4)) if tag: name = a[0].rstrip() a[0] = name + "/1\n" b[0] = name + "/2\n" pairsfw.writelines(a) pairsfw.writelines(b) nreads += 2 pairsfw.close() extra = nreads * 2 if tag else 0 checkShuffleSizes(p1, p2, pairsfastq, extra=extra) logging.debug("File `{0}` verified after writing {1} reads.".\ format(pairsfastq, nreads)) return pairsfastq
python
def shuffle(args): """ %prog shuffle p1.fastq p2.fastq Shuffle pairs into interleaved format. """ p = OptionParser(shuffle.__doc__) p.set_tag() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) p1, p2 = args pairsfastq = pairspf((p1, p2)) + ".fastq" tag = opts.tag p1fp = must_open(p1) p2fp = must_open(p2) pairsfw = must_open(pairsfastq, "w") nreads = 0 while True: a = list(islice(p1fp, 4)) if not a: break b = list(islice(p2fp, 4)) if tag: name = a[0].rstrip() a[0] = name + "/1\n" b[0] = name + "/2\n" pairsfw.writelines(a) pairsfw.writelines(b) nreads += 2 pairsfw.close() extra = nreads * 2 if tag else 0 checkShuffleSizes(p1, p2, pairsfastq, extra=extra) logging.debug("File `{0}` verified after writing {1} reads.".\ format(pairsfastq, nreads)) return pairsfastq
['def', 'shuffle', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'shuffle', '.', '__doc__', ')', 'p', '.', 'set_tag', '(', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '2', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'p1', ',', 'p2', '=', 'args', 'pairsfastq', '=', 'pairspf', '(', '(', 'p1', ',', 'p2', ')', ')', '+', '".fastq"', 'tag', '=', 'opts', '.', 'tag', 'p1fp', '=', 'must_open', '(', 'p1', ')', 'p2fp', '=', 'must_open', '(', 'p2', ')', 'pairsfw', '=', 'must_open', '(', 'pairsfastq', ',', '"w"', ')', 'nreads', '=', '0', 'while', 'True', ':', 'a', '=', 'list', '(', 'islice', '(', 'p1fp', ',', '4', ')', ')', 'if', 'not', 'a', ':', 'break', 'b', '=', 'list', '(', 'islice', '(', 'p2fp', ',', '4', ')', ')', 'if', 'tag', ':', 'name', '=', 'a', '[', '0', ']', '.', 'rstrip', '(', ')', 'a', '[', '0', ']', '=', 'name', '+', '"/1\\n"', 'b', '[', '0', ']', '=', 'name', '+', '"/2\\n"', 'pairsfw', '.', 'writelines', '(', 'a', ')', 'pairsfw', '.', 'writelines', '(', 'b', ')', 'nreads', '+=', '2', 'pairsfw', '.', 'close', '(', ')', 'extra', '=', 'nreads', '*', '2', 'if', 'tag', 'else', '0', 'checkShuffleSizes', '(', 'p1', ',', 'p2', ',', 'pairsfastq', ',', 'extra', '=', 'extra', ')', 'logging', '.', 'debug', '(', '"File `{0}` verified after writing {1} reads."', '.', 'format', '(', 'pairsfastq', ',', 'nreads', ')', ')', 'return', 'pairsfastq']
%prog shuffle p1.fastq p2.fastq Shuffle pairs into interleaved format.
['%prog', 'shuffle', 'p1', '.', 'fastq', 'p2', '.', 'fastq']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L509-L551
8,292
quantopian/zipline
zipline/finance/ledger.py
Ledger.process_transaction
def process_transaction(self, transaction): """Add a transaction to ledger, updating the current state as needed. Parameters ---------- transaction : zp.Transaction The transaction to execute. """ asset = transaction.asset if isinstance(asset, Future): try: old_price = self._payout_last_sale_prices[asset] except KeyError: self._payout_last_sale_prices[asset] = transaction.price else: position = self.position_tracker.positions[asset] amount = position.amount price = transaction.price self._cash_flow( self._calculate_payout( asset.price_multiplier, amount, old_price, price, ), ) if amount + transaction.amount == 0: del self._payout_last_sale_prices[asset] else: self._payout_last_sale_prices[asset] = price else: self._cash_flow(-(transaction.price * transaction.amount)) self.position_tracker.execute_transaction(transaction) # we only ever want the dict form from now on transaction_dict = transaction.to_dict() try: self._processed_transactions[transaction.dt].append( transaction_dict, ) except KeyError: self._processed_transactions[transaction.dt] = [transaction_dict]
python
def process_transaction(self, transaction): """Add a transaction to ledger, updating the current state as needed. Parameters ---------- transaction : zp.Transaction The transaction to execute. """ asset = transaction.asset if isinstance(asset, Future): try: old_price = self._payout_last_sale_prices[asset] except KeyError: self._payout_last_sale_prices[asset] = transaction.price else: position = self.position_tracker.positions[asset] amount = position.amount price = transaction.price self._cash_flow( self._calculate_payout( asset.price_multiplier, amount, old_price, price, ), ) if amount + transaction.amount == 0: del self._payout_last_sale_prices[asset] else: self._payout_last_sale_prices[asset] = price else: self._cash_flow(-(transaction.price * transaction.amount)) self.position_tracker.execute_transaction(transaction) # we only ever want the dict form from now on transaction_dict = transaction.to_dict() try: self._processed_transactions[transaction.dt].append( transaction_dict, ) except KeyError: self._processed_transactions[transaction.dt] = [transaction_dict]
['def', 'process_transaction', '(', 'self', ',', 'transaction', ')', ':', 'asset', '=', 'transaction', '.', 'asset', 'if', 'isinstance', '(', 'asset', ',', 'Future', ')', ':', 'try', ':', 'old_price', '=', 'self', '.', '_payout_last_sale_prices', '[', 'asset', ']', 'except', 'KeyError', ':', 'self', '.', '_payout_last_sale_prices', '[', 'asset', ']', '=', 'transaction', '.', 'price', 'else', ':', 'position', '=', 'self', '.', 'position_tracker', '.', 'positions', '[', 'asset', ']', 'amount', '=', 'position', '.', 'amount', 'price', '=', 'transaction', '.', 'price', 'self', '.', '_cash_flow', '(', 'self', '.', '_calculate_payout', '(', 'asset', '.', 'price_multiplier', ',', 'amount', ',', 'old_price', ',', 'price', ',', ')', ',', ')', 'if', 'amount', '+', 'transaction', '.', 'amount', '==', '0', ':', 'del', 'self', '.', '_payout_last_sale_prices', '[', 'asset', ']', 'else', ':', 'self', '.', '_payout_last_sale_prices', '[', 'asset', ']', '=', 'price', 'else', ':', 'self', '.', '_cash_flow', '(', '-', '(', 'transaction', '.', 'price', '*', 'transaction', '.', 'amount', ')', ')', 'self', '.', 'position_tracker', '.', 'execute_transaction', '(', 'transaction', ')', '# we only ever want the dict form from now on', 'transaction_dict', '=', 'transaction', '.', 'to_dict', '(', ')', 'try', ':', 'self', '.', '_processed_transactions', '[', 'transaction', '.', 'dt', ']', '.', 'append', '(', 'transaction_dict', ',', ')', 'except', 'KeyError', ':', 'self', '.', '_processed_transactions', '[', 'transaction', '.', 'dt', ']', '=', '[', 'transaction_dict', ']']
Add a transaction to ledger, updating the current state as needed. Parameters ---------- transaction : zp.Transaction The transaction to execute.
['Add', 'a', 'transaction', 'to', 'ledger', 'updating', 'the', 'current', 'state', 'as', 'needed', '.']
train
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L479-L523
8,293
twisted/axiom
axiom/dependency.py
installedOn
def installedOn(self): """ If this item is installed on another item, return the install target. Otherwise return None. """ try: return self.store.findUnique(_DependencyConnector, _DependencyConnector.installee == self ).target except ItemNotFound: return None
python
def installedOn(self): """ If this item is installed on another item, return the install target. Otherwise return None. """ try: return self.store.findUnique(_DependencyConnector, _DependencyConnector.installee == self ).target except ItemNotFound: return None
['def', 'installedOn', '(', 'self', ')', ':', 'try', ':', 'return', 'self', '.', 'store', '.', 'findUnique', '(', '_DependencyConnector', ',', '_DependencyConnector', '.', 'installee', '==', 'self', ')', '.', 'target', 'except', 'ItemNotFound', ':', 'return', 'None']
If this item is installed on another item, return the install target. Otherwise return None.
['If', 'this', 'item', 'is', 'installed', 'on', 'another', 'item', 'return', 'the', 'install', 'target', '.', 'Otherwise', 'return', 'None', '.']
train
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/dependency.py#L174-L184
8,294
dnanexus/dx-toolkit
src/python/dxpy/api.py
global_workflow_run
def global_workflow_run(name_or_id, alias=None, input_params={}, always_retry=True, **kwargs): """ Invokes the /globalworkflow-xxxx/run API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Global-Workflows#API-method:-/globalworkflow-xxxx%5B/yyyy%5D/run """ input_params_cp = Nonce.update_nonce(input_params) fully_qualified_version = name_or_id + (('/' + alias) if alias else '') return DXHTTPRequest('/%s/run' % fully_qualified_version, input_params_cp, always_retry=always_retry, **kwargs)
python
def global_workflow_run(name_or_id, alias=None, input_params={}, always_retry=True, **kwargs): """ Invokes the /globalworkflow-xxxx/run API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Global-Workflows#API-method:-/globalworkflow-xxxx%5B/yyyy%5D/run """ input_params_cp = Nonce.update_nonce(input_params) fully_qualified_version = name_or_id + (('/' + alias) if alias else '') return DXHTTPRequest('/%s/run' % fully_qualified_version, input_params_cp, always_retry=always_retry, **kwargs)
['def', 'global_workflow_run', '(', 'name_or_id', ',', 'alias', '=', 'None', ',', 'input_params', '=', '{', '}', ',', 'always_retry', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'input_params_cp', '=', 'Nonce', '.', 'update_nonce', '(', 'input_params', ')', 'fully_qualified_version', '=', 'name_or_id', '+', '(', '(', "'/'", '+', 'alias', ')', 'if', 'alias', 'else', "''", ')', 'return', 'DXHTTPRequest', '(', "'/%s/run'", '%', 'fully_qualified_version', ',', 'input_params_cp', ',', 'always_retry', '=', 'always_retry', ',', '*', '*', 'kwargs', ')']
Invokes the /globalworkflow-xxxx/run API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Global-Workflows#API-method:-/globalworkflow-xxxx%5B/yyyy%5D/run
['Invokes', 'the', '/', 'globalworkflow', '-', 'xxxx', '/', 'run', 'API', 'method', '.']
train
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L762-L770
8,295
jldbc/pybaseball
pybaseball/statcast_batter.py
statcast_batter
def statcast_batter(start_dt=None, end_dt=None, player_id=None): """ Pulls statcast pitch-level data from Baseball Savant for a given batter. ARGUMENTS start_dt : YYYY-MM-DD : the first date for which you want a player's statcast data end_dt : YYYY-MM-DD : the final date for which you want data player_id : INT : the player's MLBAM ID. Find this by calling pybaseball.playerid_lookup(last_name, first_name), finding the correct player, and selecting their key_mlbam. """ start_dt, end_dt, player_id = sanitize_input(start_dt, end_dt, player_id) # inputs are valid if either both or zero dates are supplied. Not valid of only one given. if start_dt and end_dt: url = 'https://baseballsavant.mlb.com/statcast_search/csv?all=true&hfPT=&hfAB=&hfBBT=&hfPR=&hfZ=&stadium=&hfBBL=&hfNewZones=&hfGT=R%7CPO%7CS%7C=&hfSea=&hfSit=&player_type=batter&hfOuts=&opponent=&pitcher_throws=&batter_stands=&hfSA=&game_date_gt={}&game_date_lt={}&batters_lookup%5B%5D={}&team=&position=&hfRO=&home_road=&hfFlag=&metric_1=&hfInn=&min_pitches=0&min_results=0&group_by=name&sort_col=pitches&player_event_sort=h_launch_speed&sort_order=desc&min_abs=0&type=details&' df = split_request(start_dt, end_dt, player_id, url) return df
python
def statcast_batter(start_dt=None, end_dt=None, player_id=None): """ Pulls statcast pitch-level data from Baseball Savant for a given batter. ARGUMENTS start_dt : YYYY-MM-DD : the first date for which you want a player's statcast data end_dt : YYYY-MM-DD : the final date for which you want data player_id : INT : the player's MLBAM ID. Find this by calling pybaseball.playerid_lookup(last_name, first_name), finding the correct player, and selecting their key_mlbam. """ start_dt, end_dt, player_id = sanitize_input(start_dt, end_dt, player_id) # inputs are valid if either both or zero dates are supplied. Not valid of only one given. if start_dt and end_dt: url = 'https://baseballsavant.mlb.com/statcast_search/csv?all=true&hfPT=&hfAB=&hfBBT=&hfPR=&hfZ=&stadium=&hfBBL=&hfNewZones=&hfGT=R%7CPO%7CS%7C=&hfSea=&hfSit=&player_type=batter&hfOuts=&opponent=&pitcher_throws=&batter_stands=&hfSA=&game_date_gt={}&game_date_lt={}&batters_lookup%5B%5D={}&team=&position=&hfRO=&home_road=&hfFlag=&metric_1=&hfInn=&min_pitches=0&min_results=0&group_by=name&sort_col=pitches&player_event_sort=h_launch_speed&sort_order=desc&min_abs=0&type=details&' df = split_request(start_dt, end_dt, player_id, url) return df
['def', 'statcast_batter', '(', 'start_dt', '=', 'None', ',', 'end_dt', '=', 'None', ',', 'player_id', '=', 'None', ')', ':', 'start_dt', ',', 'end_dt', ',', 'player_id', '=', 'sanitize_input', '(', 'start_dt', ',', 'end_dt', ',', 'player_id', ')', '# inputs are valid if either both or zero dates are supplied. Not valid of only one given.', 'if', 'start_dt', 'and', 'end_dt', ':', 'url', '=', "'https://baseballsavant.mlb.com/statcast_search/csv?all=true&hfPT=&hfAB=&hfBBT=&hfPR=&hfZ=&stadium=&hfBBL=&hfNewZones=&hfGT=R%7CPO%7CS%7C=&hfSea=&hfSit=&player_type=batter&hfOuts=&opponent=&pitcher_throws=&batter_stands=&hfSA=&game_date_gt={}&game_date_lt={}&batters_lookup%5B%5D={}&team=&position=&hfRO=&home_road=&hfFlag=&metric_1=&hfInn=&min_pitches=0&min_results=0&group_by=name&sort_col=pitches&player_event_sort=h_launch_speed&sort_order=desc&min_abs=0&type=details&'", 'df', '=', 'split_request', '(', 'start_dt', ',', 'end_dt', ',', 'player_id', ',', 'url', ')', 'return', 'df']
Pulls statcast pitch-level data from Baseball Savant for a given batter. ARGUMENTS start_dt : YYYY-MM-DD : the first date for which you want a player's statcast data end_dt : YYYY-MM-DD : the final date for which you want data player_id : INT : the player's MLBAM ID. Find this by calling pybaseball.playerid_lookup(last_name, first_name), finding the correct player, and selecting their key_mlbam.
['Pulls', 'statcast', 'pitch', '-', 'level', 'data', 'from', 'Baseball', 'Savant', 'for', 'a', 'given', 'batter', '.']
train
https://github.com/jldbc/pybaseball/blob/085ea26bfd1b5f5926d79d4fac985c88278115f2/pybaseball/statcast_batter.py#L4-L18
8,296
markovmodel/PyEMMA
devtools/ci/jenkins/update_versions_json.py
main
def main(argv=None): '''Command line options.''' if argv is None: argv = sys.argv else: sys.argv.extend(argv) parser = ArgumentParser() parser.add_argument('-u', '--url', dest='url', required=True, help="base url (has to contain versions json)") parser.add_argument('-o', '--output', dest='output') parser.add_argument('-a', '--add_version', dest='version') parser.add_argument('-v', '--verbose', dest='verbose', action='store_true') parser.add_argument('-l', '--latest-version', dest='latest', action='store_true') args = parser.parse_args() URL = args.url # get dict versions = json.load(urlopen(URL + '/versions.json')) # add new version if args.version: versions.append(make_version_dict(URL, args.version)) # create Version objects to compare them version_objs = [parse(s['version']) for s in versions] # unify and sort version_objs = set(version_objs) version_objs = sorted(list(version_objs)) versions = [make_version_dict(URL, str(v)) for v in version_objs if v != 'devel'] # last element should be the highest version versions[-1]['latest'] = True versions.append(make_version_dict(URL, 'devel', '', False)) if args.verbose: print("new versions json:") json.dump(versions, sys.stdout, indent=1) print() if args.latest: print(find_latest(versions)['version']) return 0 if args.output: with open(args.output, 'w') as v: json.dump(versions, v, indent=1) v.flush()
python
def main(argv=None): '''Command line options.''' if argv is None: argv = sys.argv else: sys.argv.extend(argv) parser = ArgumentParser() parser.add_argument('-u', '--url', dest='url', required=True, help="base url (has to contain versions json)") parser.add_argument('-o', '--output', dest='output') parser.add_argument('-a', '--add_version', dest='version') parser.add_argument('-v', '--verbose', dest='verbose', action='store_true') parser.add_argument('-l', '--latest-version', dest='latest', action='store_true') args = parser.parse_args() URL = args.url # get dict versions = json.load(urlopen(URL + '/versions.json')) # add new version if args.version: versions.append(make_version_dict(URL, args.version)) # create Version objects to compare them version_objs = [parse(s['version']) for s in versions] # unify and sort version_objs = set(version_objs) version_objs = sorted(list(version_objs)) versions = [make_version_dict(URL, str(v)) for v in version_objs if v != 'devel'] # last element should be the highest version versions[-1]['latest'] = True versions.append(make_version_dict(URL, 'devel', '', False)) if args.verbose: print("new versions json:") json.dump(versions, sys.stdout, indent=1) print() if args.latest: print(find_latest(versions)['version']) return 0 if args.output: with open(args.output, 'w') as v: json.dump(versions, v, indent=1) v.flush()
['def', 'main', '(', 'argv', '=', 'None', ')', ':', 'if', 'argv', 'is', 'None', ':', 'argv', '=', 'sys', '.', 'argv', 'else', ':', 'sys', '.', 'argv', '.', 'extend', '(', 'argv', ')', 'parser', '=', 'ArgumentParser', '(', ')', 'parser', '.', 'add_argument', '(', "'-u'", ',', "'--url'", ',', 'dest', '=', "'url'", ',', 'required', '=', 'True', ',', 'help', '=', '"base url (has to contain versions json)"', ')', 'parser', '.', 'add_argument', '(', "'-o'", ',', "'--output'", ',', 'dest', '=', "'output'", ')', 'parser', '.', 'add_argument', '(', "'-a'", ',', "'--add_version'", ',', 'dest', '=', "'version'", ')', 'parser', '.', 'add_argument', '(', "'-v'", ',', "'--verbose'", ',', 'dest', '=', "'verbose'", ',', 'action', '=', "'store_true'", ')', 'parser', '.', 'add_argument', '(', "'-l'", ',', "'--latest-version'", ',', 'dest', '=', "'latest'", ',', 'action', '=', "'store_true'", ')', 'args', '=', 'parser', '.', 'parse_args', '(', ')', 'URL', '=', 'args', '.', 'url', '# get dict', 'versions', '=', 'json', '.', 'load', '(', 'urlopen', '(', 'URL', '+', "'/versions.json'", ')', ')', '# add new version', 'if', 'args', '.', 'version', ':', 'versions', '.', 'append', '(', 'make_version_dict', '(', 'URL', ',', 'args', '.', 'version', ')', ')', '# create Version objects to compare them', 'version_objs', '=', '[', 'parse', '(', 's', '[', "'version'", ']', ')', 'for', 's', 'in', 'versions', ']', '# unify and sort', 'version_objs', '=', 'set', '(', 'version_objs', ')', 'version_objs', '=', 'sorted', '(', 'list', '(', 'version_objs', ')', ')', 'versions', '=', '[', 'make_version_dict', '(', 'URL', ',', 'str', '(', 'v', ')', ')', 'for', 'v', 'in', 'version_objs', 'if', 'v', '!=', "'devel'", ']', '# last element should be the highest version', 'versions', '[', '-', '1', ']', '[', "'latest'", ']', '=', 'True', 'versions', '.', 'append', '(', 'make_version_dict', '(', 'URL', ',', "'devel'", ',', "''", ',', 'False', ')', ')', 'if', 'args', '.', 'verbose', ':', 'print', '(', '"new versions json:"', ')', 'json', '.', 'dump', '(', 'versions', ',', 'sys', '.', 'stdout', ',', 'indent', '=', '1', ')', 'print', '(', ')', 'if', 'args', '.', 'latest', ':', 'print', '(', 'find_latest', '(', 'versions', ')', '[', "'version'", ']', ')', 'return', '0', 'if', 'args', '.', 'output', ':', 'with', 'open', '(', 'args', '.', 'output', ',', "'w'", ')', 'as', 'v', ':', 'json', '.', 'dump', '(', 'versions', ',', 'v', ',', 'indent', '=', '1', ')', 'v', '.', 'flush', '(', ')']
Command line options.
['Command', 'line', 'options', '.']
train
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/devtools/ci/jenkins/update_versions_json.py#L33-L82
8,297
gwastro/pycbc
pycbc/types/frequencyseries.py
FrequencySeries.save
def save(self, path, group=None, ifo='P1'): """ Save frequency series to a Numpy .npy, hdf, or text file. The first column contains the sample frequencies, the second contains the values. In the case of a complex frequency series saved as text, the imaginary part is written as a third column. When using hdf format, the data is stored as a single vector, along with relevant attributes. Parameters ---------- path: string Destination file path. Must end with either .hdf, .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt. """ ext = _os.path.splitext(path)[1] if ext == '.npy': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T _numpy.save(path, output) elif ext == '.txt': if self.kind == 'real': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T elif self.kind == 'complex': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy().real, self.numpy().imag)).T _numpy.savetxt(path, output) elif ext == '.xml' or path.endswith('.xml.gz'): from pycbc.io.live import make_psd_xmldoc from glue.ligolw import utils if self.kind != 'real': raise ValueError('XML only supports real frequency series') output = self.lal() # When writing in this format we must *not* have the 0 values at # frequencies less than flow. To resolve this we set the first # non-zero value < flow. data_lal = output.data.data first_idx = _numpy.argmax(data_lal>0) if not first_idx == 0: data_lal[:first_idx] = data_lal[first_idx] psddict = {ifo: output} utils.write_filename(make_psd_xmldoc(psddict), path, gz=path.endswith(".gz")) elif ext =='.hdf': key = 'data' if group is None else group f = h5py.File(path) ds = f.create_dataset(key, data=self.numpy(), compression='gzip', compression_opts=9, shuffle=True) ds.attrs['epoch'] = float(self.epoch) ds.attrs['delta_f'] = float(self.delta_f) else: raise ValueError('Path must end with .npy, .txt, .xml, .xml.gz ' 'or .hdf')
python
def save(self, path, group=None, ifo='P1'): """ Save frequency series to a Numpy .npy, hdf, or text file. The first column contains the sample frequencies, the second contains the values. In the case of a complex frequency series saved as text, the imaginary part is written as a third column. When using hdf format, the data is stored as a single vector, along with relevant attributes. Parameters ---------- path: string Destination file path. Must end with either .hdf, .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt. """ ext = _os.path.splitext(path)[1] if ext == '.npy': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T _numpy.save(path, output) elif ext == '.txt': if self.kind == 'real': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T elif self.kind == 'complex': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy().real, self.numpy().imag)).T _numpy.savetxt(path, output) elif ext == '.xml' or path.endswith('.xml.gz'): from pycbc.io.live import make_psd_xmldoc from glue.ligolw import utils if self.kind != 'real': raise ValueError('XML only supports real frequency series') output = self.lal() # When writing in this format we must *not* have the 0 values at # frequencies less than flow. To resolve this we set the first # non-zero value < flow. data_lal = output.data.data first_idx = _numpy.argmax(data_lal>0) if not first_idx == 0: data_lal[:first_idx] = data_lal[first_idx] psddict = {ifo: output} utils.write_filename(make_psd_xmldoc(psddict), path, gz=path.endswith(".gz")) elif ext =='.hdf': key = 'data' if group is None else group f = h5py.File(path) ds = f.create_dataset(key, data=self.numpy(), compression='gzip', compression_opts=9, shuffle=True) ds.attrs['epoch'] = float(self.epoch) ds.attrs['delta_f'] = float(self.delta_f) else: raise ValueError('Path must end with .npy, .txt, .xml, .xml.gz ' 'or .hdf')
['def', 'save', '(', 'self', ',', 'path', ',', 'group', '=', 'None', ',', 'ifo', '=', "'P1'", ')', ':', 'ext', '=', '_os', '.', 'path', '.', 'splitext', '(', 'path', ')', '[', '1', ']', 'if', 'ext', '==', "'.npy'", ':', 'output', '=', '_numpy', '.', 'vstack', '(', '(', 'self', '.', 'sample_frequencies', '.', 'numpy', '(', ')', ',', 'self', '.', 'numpy', '(', ')', ')', ')', '.', 'T', '_numpy', '.', 'save', '(', 'path', ',', 'output', ')', 'elif', 'ext', '==', "'.txt'", ':', 'if', 'self', '.', 'kind', '==', "'real'", ':', 'output', '=', '_numpy', '.', 'vstack', '(', '(', 'self', '.', 'sample_frequencies', '.', 'numpy', '(', ')', ',', 'self', '.', 'numpy', '(', ')', ')', ')', '.', 'T', 'elif', 'self', '.', 'kind', '==', "'complex'", ':', 'output', '=', '_numpy', '.', 'vstack', '(', '(', 'self', '.', 'sample_frequencies', '.', 'numpy', '(', ')', ',', 'self', '.', 'numpy', '(', ')', '.', 'real', ',', 'self', '.', 'numpy', '(', ')', '.', 'imag', ')', ')', '.', 'T', '_numpy', '.', 'savetxt', '(', 'path', ',', 'output', ')', 'elif', 'ext', '==', "'.xml'", 'or', 'path', '.', 'endswith', '(', "'.xml.gz'", ')', ':', 'from', 'pycbc', '.', 'io', '.', 'live', 'import', 'make_psd_xmldoc', 'from', 'glue', '.', 'ligolw', 'import', 'utils', 'if', 'self', '.', 'kind', '!=', "'real'", ':', 'raise', 'ValueError', '(', "'XML only supports real frequency series'", ')', 'output', '=', 'self', '.', 'lal', '(', ')', '# When writing in this format we must *not* have the 0 values at', '# frequencies less than flow. To resolve this we set the first', '# non-zero value < flow.', 'data_lal', '=', 'output', '.', 'data', '.', 'data', 'first_idx', '=', '_numpy', '.', 'argmax', '(', 'data_lal', '>', '0', ')', 'if', 'not', 'first_idx', '==', '0', ':', 'data_lal', '[', ':', 'first_idx', ']', '=', 'data_lal', '[', 'first_idx', ']', 'psddict', '=', '{', 'ifo', ':', 'output', '}', 'utils', '.', 'write_filename', '(', 'make_psd_xmldoc', '(', 'psddict', ')', ',', 'path', ',', 'gz', '=', 'path', '.', 'endswith', '(', '".gz"', ')', ')', 'elif', 'ext', '==', "'.hdf'", ':', 'key', '=', "'data'", 'if', 'group', 'is', 'None', 'else', 'group', 'f', '=', 'h5py', '.', 'File', '(', 'path', ')', 'ds', '=', 'f', '.', 'create_dataset', '(', 'key', ',', 'data', '=', 'self', '.', 'numpy', '(', ')', ',', 'compression', '=', "'gzip'", ',', 'compression_opts', '=', '9', ',', 'shuffle', '=', 'True', ')', 'ds', '.', 'attrs', '[', "'epoch'", ']', '=', 'float', '(', 'self', '.', 'epoch', ')', 'ds', '.', 'attrs', '[', "'delta_f'", ']', '=', 'float', '(', 'self', '.', 'delta_f', ')', 'else', ':', 'raise', 'ValueError', '(', "'Path must end with .npy, .txt, .xml, .xml.gz '", "'or .hdf'", ')']
Save frequency series to a Numpy .npy, hdf, or text file. The first column contains the sample frequencies, the second contains the values. In the case of a complex frequency series saved as text, the imaginary part is written as a third column. When using hdf format, the data is stored as a single vector, along with relevant attributes. Parameters ---------- path: string Destination file path. Must end with either .hdf, .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt.
['Save', 'frequency', 'series', 'to', 'a', 'Numpy', '.', 'npy', 'hdf', 'or', 'text', 'file', '.', 'The', 'first', 'column', 'contains', 'the', 'sample', 'frequencies', 'the', 'second', 'contains', 'the', 'values', '.', 'In', 'the', 'case', 'of', 'a', 'complex', 'frequency', 'series', 'saved', 'as', 'text', 'the', 'imaginary', 'part', 'is', 'written', 'as', 'a', 'third', 'column', '.', 'When', 'using', 'hdf', 'format', 'the', 'data', 'is', 'stored', 'as', 'a', 'single', 'vector', 'along', 'with', 'relevant', 'attributes', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/frequencyseries.py#L366-L429
8,298
spacetelescope/drizzlepac
drizzlepac/wcs_functions.py
WCSMap.forward
def forward(self,pixx,pixy): """ Transform the input pixx,pixy positions in the input frame to pixel positions in the output frame. This method gets passed to the drizzle algorithm. """ # This matches WTRAXY results to better than 1e-4 pixels. skyx,skyy = self.input.all_pix2world(pixx,pixy,self.origin) result= self.output.wcs_world2pix(skyx,skyy,self.origin) return result
python
def forward(self,pixx,pixy): """ Transform the input pixx,pixy positions in the input frame to pixel positions in the output frame. This method gets passed to the drizzle algorithm. """ # This matches WTRAXY results to better than 1e-4 pixels. skyx,skyy = self.input.all_pix2world(pixx,pixy,self.origin) result= self.output.wcs_world2pix(skyx,skyy,self.origin) return result
['def', 'forward', '(', 'self', ',', 'pixx', ',', 'pixy', ')', ':', '# This matches WTRAXY results to better than 1e-4 pixels.', 'skyx', ',', 'skyy', '=', 'self', '.', 'input', '.', 'all_pix2world', '(', 'pixx', ',', 'pixy', ',', 'self', '.', 'origin', ')', 'result', '=', 'self', '.', 'output', '.', 'wcs_world2pix', '(', 'skyx', ',', 'skyy', ',', 'self', '.', 'origin', ')', 'return', 'result']
Transform the input pixx,pixy positions in the input frame to pixel positions in the output frame. This method gets passed to the drizzle algorithm.
['Transform', 'the', 'input', 'pixx', 'pixy', 'positions', 'in', 'the', 'input', 'frame', 'to', 'pixel', 'positions', 'in', 'the', 'output', 'frame', '.']
train
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/wcs_functions.py#L53-L62
8,299
browniebroke/deezer-python
deezer/client.py
Client.get_object
def get_object( self, object_t, object_id=None, relation=None, parent=None, **kwargs ): """ Actually query the Deezer API to retrieve the object :returns: json dictionary """ url = self.object_url(object_t, object_id, relation, **kwargs) response = self.session.get(url) return self._process_json(response.json(), parent)
python
def get_object( self, object_t, object_id=None, relation=None, parent=None, **kwargs ): """ Actually query the Deezer API to retrieve the object :returns: json dictionary """ url = self.object_url(object_t, object_id, relation, **kwargs) response = self.session.get(url) return self._process_json(response.json(), parent)
['def', 'get_object', '(', 'self', ',', 'object_t', ',', 'object_id', '=', 'None', ',', 'relation', '=', 'None', ',', 'parent', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'url', '=', 'self', '.', 'object_url', '(', 'object_t', ',', 'object_id', ',', 'relation', ',', '*', '*', 'kwargs', ')', 'response', '=', 'self', '.', 'session', '.', 'get', '(', 'url', ')', 'return', 'self', '.', '_process_json', '(', 'response', '.', 'json', '(', ')', ',', 'parent', ')']
Actually query the Deezer API to retrieve the object :returns: json dictionary
['Actually', 'query', 'the', 'Deezer', 'API', 'to', 'retrieve', 'the', 'object']
train
https://github.com/browniebroke/deezer-python/blob/fb869c3617045b22e7124e4b783ec1a68d283ac3/deezer/client.py#L128-L138